blob: bb1701f8fe453c9ab9c28f06dd0373f1a452382a [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070016
17#include "image.h"
18
Mathieu Chartier1a842962018-11-13 15:09:51 -080019#include <lz4.h>
Nicolas Geoffraybc50ac12023-02-23 14:03:55 +000020#include <lz4hc.h>
Mathieu Chartier1a842962018-11-13 15:09:51 -080021#include <sstream>
Nicolas Geoffraybc50ac12023-02-23 14:03:55 +000022#include <sys/stat.h>
23#include <zlib.h>
Mathieu Chartier1a842962018-11-13 15:09:51 -080024
Ulya Trafimovicha97cddd2022-09-28 11:22:09 +010025#include "android-base/stringprintf.h"
26
Vladimir Marko80afd022015-05-19 18:08:00 +010027#include "base/bit_utils.h"
Andreas Gampec6ea7d02017-02-01 16:46:28 -080028#include "base/length_prefixed_array.h"
David Sehrc431b9d2018-03-02 12:01:51 -080029#include "base/utils.h"
Ian Rogers4f6ad8a2013-03-18 15:27:28 -070030#include "mirror/object-inl.h"
Andreas Gampe8cf9cb382017-07-19 09:28:38 -070031#include "mirror/object_array-inl.h"
32#include "mirror/object_array.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080033
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070034namespace art {
35
Ian Rogers13735952014-10-08 12:43:28 -070036const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
Nicolas Geoffray1905f322023-02-13 13:18:10 +000037// Last change: Add DexCacheSection.
38const uint8_t ImageHeader::kImageVersion[] = { '1', '0', '8', '\0' };
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070039
Vladimir Marko7391c8c2018-11-21 17:58:44 +000040ImageHeader::ImageHeader(uint32_t image_reservation_size,
41 uint32_t component_count,
42 uint32_t image_begin,
Mathieu Chartier31e89252013-08-28 11:29:12 -070043 uint32_t image_size,
Mathieu Chartiere401d142015-04-22 13:56:20 -070044 ImageSection* sections,
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080045 uint32_t image_roots,
46 uint32_t oat_checksum,
47 uint32_t oat_file_begin,
48 uint32_t oat_data_begin,
49 uint32_t oat_data_end,
Igor Murashkin46774762014-10-22 11:37:02 -070050 uint32_t oat_file_end,
Mathieu Chartierfbc31082016-01-24 11:59:56 -080051 uint32_t boot_image_begin,
52 uint32_t boot_image_size,
Vladimir Marko92eec3a2019-11-05 10:59:36 +000053 uint32_t boot_image_component_count,
54 uint32_t boot_image_checksum,
Mathieu Chartier1a842962018-11-13 15:09:51 -080055 uint32_t pointer_size)
Vladimir Marko7391c8c2018-11-21 17:58:44 +000056 : image_reservation_size_(image_reservation_size),
57 component_count_(component_count),
58 image_begin_(image_begin),
Mathieu Chartier31e89252013-08-28 11:29:12 -070059 image_size_(image_size),
Vladimir Markoc10a0c62018-11-16 11:39:22 +000060 image_checksum_(0u),
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080061 oat_checksum_(oat_checksum),
62 oat_file_begin_(oat_file_begin),
63 oat_data_begin_(oat_data_begin),
64 oat_data_end_(oat_data_end),
65 oat_file_end_(oat_file_end),
Mathieu Chartierfbc31082016-01-24 11:59:56 -080066 boot_image_begin_(boot_image_begin),
67 boot_image_size_(boot_image_size),
Vladimir Marko92eec3a2019-11-05 10:59:36 +000068 boot_image_component_count_(boot_image_component_count),
69 boot_image_checksum_(boot_image_checksum),
Igor Murashkin46774762014-10-22 11:37:02 -070070 image_roots_(image_roots),
Mathieu Chartier1a842962018-11-13 15:09:51 -080071 pointer_size_(pointer_size) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080072 CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
Nicolas Geoffray5d3a23d2022-12-08 14:51:25 +000073 if (oat_checksum != 0u) {
74 CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
75 CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
76 CHECK_LT(image_roots, oat_file_begin);
77 CHECK_LE(oat_file_begin, oat_data_begin);
78 CHECK_LT(oat_data_begin, oat_data_end);
79 CHECK_LE(oat_data_end, oat_file_end);
80 }
Mathieu Chartiere401d142015-04-22 13:56:20 -070081 CHECK(ValidPointerSize(pointer_size_)) << pointer_size_;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080082 memcpy(magic_, kImageMagic, sizeof(kImageMagic));
83 memcpy(version_, kImageVersion, sizeof(kImageVersion));
Mathieu Chartiere401d142015-04-22 13:56:20 -070084 std::copy_n(sections, kSectionCount, sections_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080085}
86
Vladimir Markoc0b30c92019-07-23 14:58:25 +010087void ImageHeader::RelocateImageReferences(int64_t delta) {
88 CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned";
Alex Light53cb16b2014-06-12 11:26:29 -070089 oat_file_begin_ += delta;
90 oat_data_begin_ += delta;
91 oat_data_end_ += delta;
92 oat_file_end_ += delta;
Mathieu Chartierfbc31082016-01-24 11:59:56 -080093 image_begin_ += delta;
94 image_roots_ += delta;
95}
96
Vladimir Markoc0b30c92019-07-23 14:58:25 +010097void ImageHeader::RelocateBootImageReferences(int64_t delta) {
98 CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned";
99 DCHECK_EQ(boot_image_begin_ != 0u, boot_image_size_ != 0u);
100 if (boot_image_begin_ != 0u) {
101 boot_image_begin_ += delta;
102 }
Mathieu Chartiere401d142015-04-22 13:56:20 -0700103 for (size_t i = 0; i < kImageMethodsCount; ++i) {
104 image_methods_[i] += delta;
105 }
Alex Light53cb16b2014-06-12 11:26:29 -0700106}
107
Vladimir Marko21910692019-11-06 13:27:03 +0000108bool ImageHeader::IsAppImage() const {
109 // Unlike boot image and boot image extensions which include address space for
110 // oat files in their reservation size, app images are loaded separately from oat
111 // files and their reservation size is the image size rounded up to full page.
112 return image_reservation_size_ == RoundUp(image_size_, kPageSize);
113}
114
Vladimir Markod0036ac2019-11-21 11:47:12 +0000115uint32_t ImageHeader::GetImageSpaceCount() const {
116 DCHECK(!IsAppImage());
117 DCHECK_NE(component_count_, 0u); // Must be the header for the first component.
118 // For images compiled with --single-image, there is only one oat file. To detect
119 // that, check whether the reservation ends at the end of the first oat file.
120 return (image_begin_ + image_reservation_size_ == oat_file_end_) ? 1u : component_count_;
121}
122
Brian Carlstrom68708f52013-09-03 14:15:31 -0700123bool ImageHeader::IsValid() const {
124 if (memcmp(magic_, kImageMagic, sizeof(kImageMagic)) != 0) {
125 return false;
126 }
127 if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) {
128 return false;
129 }
Vladimir Marko7391c8c2018-11-21 17:58:44 +0000130 if (!IsAligned<kPageSize>(image_reservation_size_)) {
131 return false;
132 }
Alex Light53cb16b2014-06-12 11:26:29 -0700133 // Unsigned so wraparound is well defined.
134 if (image_begin_ >= image_begin_ + image_size_) {
135 return false;
136 }
Nicolas Geoffray5d3a23d2022-12-08 14:51:25 +0000137 if (oat_checksum_ != 0u) {
138 if (oat_file_begin_ > oat_file_end_) {
139 return false;
140 }
141 if (oat_data_begin_ > oat_data_end_) {
142 return false;
143 }
144 if (oat_file_begin_ >= oat_data_begin_) {
145 return false;
146 }
Alex Light53cb16b2014-06-12 11:26:29 -0700147 }
Brian Carlstrom68708f52013-09-03 14:15:31 -0700148 return true;
149}
150
151const char* ImageHeader::GetMagic() const {
152 CHECK(IsValid());
153 return reinterpret_cast<const char*>(magic_);
154}
155
Mathieu Chartiere401d142015-04-22 13:56:20 -0700156ArtMethod* ImageHeader::GetImageMethod(ImageMethod index) const {
157 CHECK_LT(static_cast<size_t>(index), kImageMethodsCount);
158 return reinterpret_cast<ArtMethod*>(image_methods_[index]);
159}
160
Mathieu Chartiere401d142015-04-22 13:56:20 -0700161std::ostream& operator<<(std::ostream& os, const ImageSection& section) {
162 return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
163}
164
David Sehra49e0532017-08-25 08:05:29 -0700165void ImageHeader::VisitObjects(ObjectVisitor* visitor,
166 uint8_t* base,
167 PointerSize pointer_size) const {
168 DCHECK_EQ(pointer_size, GetPointerSize());
169 const ImageSection& objects = GetObjectsSection();
170 static const size_t kStartPos = RoundUp(sizeof(ImageHeader), kObjectAlignment);
171 for (size_t pos = kStartPos; pos < objects.Size(); ) {
172 mirror::Object* object = reinterpret_cast<mirror::Object*>(base + objects.Offset() + pos);
173 visitor->Visit(object);
174 pos += RoundUp(object->SizeOf(), kObjectAlignment);
175 }
176}
177
Andreas Gampebda1d602016-08-29 17:43:45 -0700178PointerSize ImageHeader::GetPointerSize() const {
179 return ConvertToPointerSize(pointer_size_);
180}
181
Ulya Trafimovicha97cddd2022-09-28 11:22:09 +0100182bool LZ4_decompress_safe_checked(const char* source,
183 char* dest,
184 int compressed_size,
185 int max_decompressed_size,
186 /*out*/ size_t* decompressed_size_checked,
187 /*out*/ std::string* error_msg) {
188 int decompressed_size = LZ4_decompress_safe(source, dest, compressed_size, max_decompressed_size);
189 if (UNLIKELY(decompressed_size < 0)) {
190 *error_msg = android::base::StringPrintf("LZ4_decompress_safe() returned negative size: %d",
191 decompressed_size);
192 return false;
193 } else {
194 *decompressed_size_checked = static_cast<size_t>(decompressed_size);
195 return true;
196 }
197}
198
Mathieu Chartier1a842962018-11-13 15:09:51 -0800199bool ImageHeader::Block::Decompress(uint8_t* out_ptr,
200 const uint8_t* in_ptr,
201 std::string* error_msg) const {
202 switch (storage_mode_) {
203 case kStorageModeUncompressed: {
204 CHECK_EQ(image_size_, data_size_);
205 memcpy(out_ptr + image_offset_, in_ptr + data_offset_, data_size_);
206 break;
207 }
208 case kStorageModeLZ4:
209 case kStorageModeLZ4HC: {
210 // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
Ulya Trafimovicha97cddd2022-09-28 11:22:09 +0100211 size_t decompressed_size;
212 bool ok = LZ4_decompress_safe_checked(
Mathieu Chartier1a842962018-11-13 15:09:51 -0800213 reinterpret_cast<const char*>(in_ptr) + data_offset_,
214 reinterpret_cast<char*>(out_ptr) + image_offset_,
215 data_size_,
Ulya Trafimovicha97cddd2022-09-28 11:22:09 +0100216 image_size_,
217 &decompressed_size,
218 error_msg);
219 if (!ok) {
220 return false;
221 }
Mathieu Chartier1a842962018-11-13 15:09:51 -0800222 CHECK_EQ(decompressed_size, image_size_);
223 break;
224 }
225 default: {
226 if (error_msg != nullptr) {
227 *error_msg = (std::ostringstream() << "Invalid image format " << storage_mode_).str();
228 }
229 return false;
230 }
231 }
232 return true;
233}
234
David Srbeckybc1748f2021-02-22 10:51:32 +0000235const char* ImageHeader::GetImageSectionName(ImageSections index) {
236 switch (index) {
237 case kSectionObjects: return "Objects";
238 case kSectionArtFields: return "ArtFields";
239 case kSectionArtMethods: return "ArtMethods";
240 case kSectionRuntimeMethods: return "RuntimeMethods";
241 case kSectionImTables: return "ImTables";
242 case kSectionIMTConflictTables: return "IMTConflictTables";
243 case kSectionInternedStrings: return "InternedStrings";
244 case kSectionClassTable: return "ClassTable";
245 case kSectionStringReferenceOffsets: return "StringReferenceOffsets";
Nicolas Geoffray1905f322023-02-13 13:18:10 +0000246 case kSectionDexCacheArrays: return "DexCacheArrays";
David Srbeckybc1748f2021-02-22 10:51:32 +0000247 case kSectionMetadata: return "Metadata";
248 case kSectionImageBitmap: return "ImageBitmap";
249 case kSectionCount: return nullptr;
250 }
251}
252
Nicolas Geoffraybc50ac12023-02-23 14:03:55 +0000253// If `image_storage_mode` is compressed, compress data from `source`
254// into `storage`, and return an array pointing to the compressed.
255// If the mode is uncompressed, just return an array pointing to `source`.
256static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source,
257 ImageHeader::StorageMode image_storage_mode,
258 /*out*/ dchecked_vector<uint8_t>* storage) {
259 const uint64_t compress_start_time = NanoTime();
260
261 switch (image_storage_mode) {
262 case ImageHeader::kStorageModeLZ4: {
263 storage->resize(LZ4_compressBound(source.size()));
264 size_t data_size = LZ4_compress_default(
265 reinterpret_cast<char*>(const_cast<uint8_t*>(source.data())),
266 reinterpret_cast<char*>(storage->data()),
267 source.size(),
268 storage->size());
269 storage->resize(data_size);
270 break;
271 }
272 case ImageHeader::kStorageModeLZ4HC: {
273 // Bound is same as non HC.
274 storage->resize(LZ4_compressBound(source.size()));
275 size_t data_size = LZ4_compress_HC(
276 reinterpret_cast<const char*>(const_cast<uint8_t*>(source.data())),
277 reinterpret_cast<char*>(storage->data()),
278 source.size(),
279 storage->size(),
280 LZ4HC_CLEVEL_MAX);
281 storage->resize(data_size);
282 break;
283 }
284 case ImageHeader::kStorageModeUncompressed: {
285 return source;
286 }
287 default: {
288 LOG(FATAL) << "Unsupported";
289 UNREACHABLE();
290 }
291 }
292
293 DCHECK(image_storage_mode == ImageHeader::kStorageModeLZ4 ||
294 image_storage_mode == ImageHeader::kStorageModeLZ4HC);
295 VLOG(image) << "Compressed from " << source.size() << " to " << storage->size() << " in "
296 << PrettyDuration(NanoTime() - compress_start_time);
297 if (kIsDebugBuild) {
298 dchecked_vector<uint8_t> decompressed(source.size());
299 size_t decompressed_size;
300 std::string error_msg;
301 bool ok = LZ4_decompress_safe_checked(
302 reinterpret_cast<char*>(storage->data()),
303 reinterpret_cast<char*>(decompressed.data()),
304 storage->size(),
305 decompressed.size(),
306 &decompressed_size,
307 &error_msg);
308 if (!ok) {
309 LOG(FATAL) << error_msg;
310 UNREACHABLE();
311 }
312 CHECK_EQ(decompressed_size, decompressed.size());
313 CHECK_EQ(memcmp(source.data(), decompressed.data(), source.size()), 0) << image_storage_mode;
314 }
315 return ArrayRef<const uint8_t>(*storage);
316}
317
318bool ImageHeader::WriteData(const ImageFileGuard& image_file,
319 const uint8_t* data,
320 const uint8_t* bitmap_data,
321 ImageHeader::StorageMode image_storage_mode,
322 uint32_t max_image_block_size,
323 bool update_checksum,
324 std::string* error_msg) {
325 const bool is_compressed = image_storage_mode != ImageHeader::kStorageModeUncompressed;
326 dchecked_vector<std::pair<uint32_t, uint32_t>> block_sources;
327 dchecked_vector<ImageHeader::Block> blocks;
328
329 // Add a set of solid blocks such that no block is larger than the maximum size. A solid block
330 // is a block that must be decompressed all at once.
331 auto add_blocks = [&](uint32_t offset, uint32_t size) {
332 while (size != 0u) {
333 const uint32_t cur_size = std::min(size, max_image_block_size);
334 block_sources.emplace_back(offset, cur_size);
335 offset += cur_size;
336 size -= cur_size;
337 }
338 };
339
340 add_blocks(sizeof(ImageHeader), this->GetImageSize() - sizeof(ImageHeader));
341
342 // Checksum of compressed image data and header.
343 uint32_t image_checksum = 0u;
344 if (update_checksum) {
345 image_checksum = adler32(0L, Z_NULL, 0);
346 image_checksum = adler32(image_checksum,
347 reinterpret_cast<const uint8_t*>(this),
348 sizeof(ImageHeader));
349 }
350
351 // Copy and compress blocks.
352 uint32_t out_offset = sizeof(ImageHeader);
353 for (const std::pair<uint32_t, uint32_t> block : block_sources) {
354 ArrayRef<const uint8_t> raw_image_data(data + block.first, block.second);
355 dchecked_vector<uint8_t> compressed_data;
356 ArrayRef<const uint8_t> image_data =
357 MaybeCompressData(raw_image_data, image_storage_mode, &compressed_data);
358
359 if (!is_compressed) {
360 // For uncompressed, preserve alignment since the image will be directly mapped.
361 out_offset = block.first;
362 }
363
364 // Fill in the compressed location of the block.
365 blocks.emplace_back(ImageHeader::Block(
366 image_storage_mode,
367 /*data_offset=*/ out_offset,
368 /*data_size=*/ image_data.size(),
369 /*image_offset=*/ block.first,
370 /*image_size=*/ block.second));
371
372 if (!image_file->PwriteFully(image_data.data(), image_data.size(), out_offset)) {
373 *error_msg = "Failed to write image file data " +
374 image_file->GetPath() + ": " + std::string(strerror(errno));
375 return false;
376 }
377 out_offset += image_data.size();
378 if (update_checksum) {
379 image_checksum = adler32(image_checksum, image_data.data(), image_data.size());
380 }
381 }
382
383 if (is_compressed) {
384 // Align up since the compressed data is not necessarily aligned.
385 out_offset = RoundUp(out_offset, alignof(ImageHeader::Block));
386 CHECK(!blocks.empty());
387 const size_t blocks_bytes = blocks.size() * sizeof(blocks[0]);
388 if (!image_file->PwriteFully(&blocks[0], blocks_bytes, out_offset)) {
389 *error_msg = "Failed to write image blocks " +
390 image_file->GetPath() + ": " + std::string(strerror(errno));
391 return false;
392 }
393 this->blocks_offset_ = out_offset;
394 this->blocks_count_ = blocks.size();
395 out_offset += blocks_bytes;
396 }
397
398 // Data size includes everything except the bitmap.
399 this->data_size_ = out_offset - sizeof(ImageHeader);
400
401 // Update and write the bitmap section. Note that the bitmap section is relative to the
402 // possibly compressed image.
403 ImageSection& bitmap_section = GetImageSection(ImageHeader::kSectionImageBitmap);
404 // Align up since data size may be unaligned if the image is compressed.
405 out_offset = RoundUp(out_offset, kPageSize);
406 bitmap_section = ImageSection(out_offset, bitmap_section.Size());
407
408 if (!image_file->PwriteFully(bitmap_data,
409 bitmap_section.Size(),
410 bitmap_section.Offset())) {
411 *error_msg = "Failed to write image file bitmap " +
412 image_file->GetPath() + ": " + std::string(strerror(errno));
413 return false;
414 }
415
416 int err = image_file->Flush();
417 if (err < 0) {
418 *error_msg = "Failed to flush image file " + image_file->GetPath() + ": " + std::to_string(err);
419 return false;
420 }
421
422 if (update_checksum) {
423 // Calculate the image checksum of the remaining data.
424 image_checksum = adler32(GetImageChecksum(),
425 reinterpret_cast<const uint8_t*>(bitmap_data),
426 bitmap_section.Size());
427 this->SetImageChecksum(image_checksum);
428 }
429
430 if (VLOG_IS_ON(image)) {
431 const size_t separately_written_section_size = bitmap_section.Size();
432 const size_t total_uncompressed_size = image_size_ + separately_written_section_size;
433 const size_t total_compressed_size = out_offset + separately_written_section_size;
434
435 VLOG(compiler) << "UncompressedImageSize = " << total_uncompressed_size;
436 if (total_uncompressed_size != total_compressed_size) {
437 VLOG(compiler) << "CompressedImageSize = " << total_compressed_size;
438 }
439 }
440
441 DCHECK_EQ(bitmap_section.End(), static_cast<size_t>(image_file->GetLength()))
442 << "Bitmap should be at the end of the file";
443 return true;
444}
445
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700446} // namespace art