Elliott Hughes | 2faa5f1 | 2012-01-30 14:42:07 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 16 | |
| 17 | #include "image.h" |
| 18 | |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 19 | #include <lz4.h> |
Nicolas Geoffray | bc50ac1 | 2023-02-23 14:03:55 +0000 | [diff] [blame] | 20 | #include <lz4hc.h> |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 21 | #include <sstream> |
Nicolas Geoffray | bc50ac1 | 2023-02-23 14:03:55 +0000 | [diff] [blame] | 22 | #include <sys/stat.h> |
| 23 | #include <zlib.h> |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 24 | |
Ulya Trafimovich | a97cddd | 2022-09-28 11:22:09 +0100 | [diff] [blame] | 25 | #include "android-base/stringprintf.h" |
| 26 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 27 | #include "base/bit_utils.h" |
Andreas Gampe | c6ea7d0 | 2017-02-01 16:46:28 -0800 | [diff] [blame] | 28 | #include "base/length_prefixed_array.h" |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 29 | #include "base/utils.h" |
Ian Rogers | 4f6ad8a | 2013-03-18 15:27:28 -0700 | [diff] [blame] | 30 | #include "mirror/object-inl.h" |
Andreas Gampe | 8cf9cb38 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 31 | #include "mirror/object_array-inl.h" |
| 32 | #include "mirror/object_array.h" |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 33 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 34 | namespace art { |
| 35 | |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 36 | const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; |
Nicolas Geoffray | 1905f32 | 2023-02-13 13:18:10 +0000 | [diff] [blame] | 37 | // Last change: Add DexCacheSection. |
| 38 | const uint8_t ImageHeader::kImageVersion[] = { '1', '0', '8', '\0' }; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 39 | |
Vladimir Marko | 7391c8c | 2018-11-21 17:58:44 +0000 | [diff] [blame] | 40 | ImageHeader::ImageHeader(uint32_t image_reservation_size, |
| 41 | uint32_t component_count, |
| 42 | uint32_t image_begin, |
Mathieu Chartier | 31e8925 | 2013-08-28 11:29:12 -0700 | [diff] [blame] | 43 | uint32_t image_size, |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 44 | ImageSection* sections, |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 45 | uint32_t image_roots, |
| 46 | uint32_t oat_checksum, |
| 47 | uint32_t oat_file_begin, |
| 48 | uint32_t oat_data_begin, |
| 49 | uint32_t oat_data_end, |
Igor Murashkin | 4677476 | 2014-10-22 11:37:02 -0700 | [diff] [blame] | 50 | uint32_t oat_file_end, |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 51 | uint32_t boot_image_begin, |
| 52 | uint32_t boot_image_size, |
Vladimir Marko | 92eec3a | 2019-11-05 10:59:36 +0000 | [diff] [blame] | 53 | uint32_t boot_image_component_count, |
| 54 | uint32_t boot_image_checksum, |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 55 | uint32_t pointer_size) |
Vladimir Marko | 7391c8c | 2018-11-21 17:58:44 +0000 | [diff] [blame] | 56 | : image_reservation_size_(image_reservation_size), |
| 57 | component_count_(component_count), |
| 58 | image_begin_(image_begin), |
Mathieu Chartier | 31e8925 | 2013-08-28 11:29:12 -0700 | [diff] [blame] | 59 | image_size_(image_size), |
Vladimir Marko | c10a0c6 | 2018-11-16 11:39:22 +0000 | [diff] [blame] | 60 | image_checksum_(0u), |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 61 | oat_checksum_(oat_checksum), |
| 62 | oat_file_begin_(oat_file_begin), |
| 63 | oat_data_begin_(oat_data_begin), |
| 64 | oat_data_end_(oat_data_end), |
| 65 | oat_file_end_(oat_file_end), |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 66 | boot_image_begin_(boot_image_begin), |
| 67 | boot_image_size_(boot_image_size), |
Vladimir Marko | 92eec3a | 2019-11-05 10:59:36 +0000 | [diff] [blame] | 68 | boot_image_component_count_(boot_image_component_count), |
| 69 | boot_image_checksum_(boot_image_checksum), |
Igor Murashkin | 4677476 | 2014-10-22 11:37:02 -0700 | [diff] [blame] | 70 | image_roots_(image_roots), |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 71 | pointer_size_(pointer_size) { |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 72 | CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize)); |
Nicolas Geoffray | 5d3a23d | 2022-12-08 14:51:25 +0000 | [diff] [blame] | 73 | if (oat_checksum != 0u) { |
| 74 | CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize)); |
| 75 | CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize)); |
| 76 | CHECK_LT(image_roots, oat_file_begin); |
| 77 | CHECK_LE(oat_file_begin, oat_data_begin); |
| 78 | CHECK_LT(oat_data_begin, oat_data_end); |
| 79 | CHECK_LE(oat_data_end, oat_file_end); |
| 80 | } |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 81 | CHECK(ValidPointerSize(pointer_size_)) << pointer_size_; |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 82 | memcpy(magic_, kImageMagic, sizeof(kImageMagic)); |
| 83 | memcpy(version_, kImageVersion, sizeof(kImageVersion)); |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 84 | std::copy_n(sections, kSectionCount, sections_); |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 85 | } |
| 86 | |
Vladimir Marko | c0b30c9 | 2019-07-23 14:58:25 +0100 | [diff] [blame] | 87 | void ImageHeader::RelocateImageReferences(int64_t delta) { |
| 88 | CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned"; |
Alex Light | 53cb16b | 2014-06-12 11:26:29 -0700 | [diff] [blame] | 89 | oat_file_begin_ += delta; |
| 90 | oat_data_begin_ += delta; |
| 91 | oat_data_end_ += delta; |
| 92 | oat_file_end_ += delta; |
Mathieu Chartier | fbc3108 | 2016-01-24 11:59:56 -0800 | [diff] [blame] | 93 | image_begin_ += delta; |
| 94 | image_roots_ += delta; |
| 95 | } |
| 96 | |
Vladimir Marko | c0b30c9 | 2019-07-23 14:58:25 +0100 | [diff] [blame] | 97 | void ImageHeader::RelocateBootImageReferences(int64_t delta) { |
| 98 | CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned"; |
| 99 | DCHECK_EQ(boot_image_begin_ != 0u, boot_image_size_ != 0u); |
| 100 | if (boot_image_begin_ != 0u) { |
| 101 | boot_image_begin_ += delta; |
| 102 | } |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 103 | for (size_t i = 0; i < kImageMethodsCount; ++i) { |
| 104 | image_methods_[i] += delta; |
| 105 | } |
Alex Light | 53cb16b | 2014-06-12 11:26:29 -0700 | [diff] [blame] | 106 | } |
| 107 | |
Vladimir Marko | 2191069 | 2019-11-06 13:27:03 +0000 | [diff] [blame] | 108 | bool ImageHeader::IsAppImage() const { |
| 109 | // Unlike boot image and boot image extensions which include address space for |
| 110 | // oat files in their reservation size, app images are loaded separately from oat |
| 111 | // files and their reservation size is the image size rounded up to full page. |
| 112 | return image_reservation_size_ == RoundUp(image_size_, kPageSize); |
| 113 | } |
| 114 | |
Vladimir Marko | d0036ac | 2019-11-21 11:47:12 +0000 | [diff] [blame] | 115 | uint32_t ImageHeader::GetImageSpaceCount() const { |
| 116 | DCHECK(!IsAppImage()); |
| 117 | DCHECK_NE(component_count_, 0u); // Must be the header for the first component. |
| 118 | // For images compiled with --single-image, there is only one oat file. To detect |
| 119 | // that, check whether the reservation ends at the end of the first oat file. |
| 120 | return (image_begin_ + image_reservation_size_ == oat_file_end_) ? 1u : component_count_; |
| 121 | } |
| 122 | |
Brian Carlstrom | 68708f5 | 2013-09-03 14:15:31 -0700 | [diff] [blame] | 123 | bool ImageHeader::IsValid() const { |
| 124 | if (memcmp(magic_, kImageMagic, sizeof(kImageMagic)) != 0) { |
| 125 | return false; |
| 126 | } |
| 127 | if (memcmp(version_, kImageVersion, sizeof(kImageVersion)) != 0) { |
| 128 | return false; |
| 129 | } |
Vladimir Marko | 7391c8c | 2018-11-21 17:58:44 +0000 | [diff] [blame] | 130 | if (!IsAligned<kPageSize>(image_reservation_size_)) { |
| 131 | return false; |
| 132 | } |
Alex Light | 53cb16b | 2014-06-12 11:26:29 -0700 | [diff] [blame] | 133 | // Unsigned so wraparound is well defined. |
| 134 | if (image_begin_ >= image_begin_ + image_size_) { |
| 135 | return false; |
| 136 | } |
Nicolas Geoffray | 5d3a23d | 2022-12-08 14:51:25 +0000 | [diff] [blame] | 137 | if (oat_checksum_ != 0u) { |
| 138 | if (oat_file_begin_ > oat_file_end_) { |
| 139 | return false; |
| 140 | } |
| 141 | if (oat_data_begin_ > oat_data_end_) { |
| 142 | return false; |
| 143 | } |
| 144 | if (oat_file_begin_ >= oat_data_begin_) { |
| 145 | return false; |
| 146 | } |
Alex Light | 53cb16b | 2014-06-12 11:26:29 -0700 | [diff] [blame] | 147 | } |
Brian Carlstrom | 68708f5 | 2013-09-03 14:15:31 -0700 | [diff] [blame] | 148 | return true; |
| 149 | } |
| 150 | |
| 151 | const char* ImageHeader::GetMagic() const { |
| 152 | CHECK(IsValid()); |
| 153 | return reinterpret_cast<const char*>(magic_); |
| 154 | } |
| 155 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 156 | ArtMethod* ImageHeader::GetImageMethod(ImageMethod index) const { |
| 157 | CHECK_LT(static_cast<size_t>(index), kImageMethodsCount); |
| 158 | return reinterpret_cast<ArtMethod*>(image_methods_[index]); |
| 159 | } |
| 160 | |
Mathieu Chartier | e401d14 | 2015-04-22 13:56:20 -0700 | [diff] [blame] | 161 | std::ostream& operator<<(std::ostream& os, const ImageSection& section) { |
| 162 | return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End(); |
| 163 | } |
| 164 | |
David Sehr | a49e053 | 2017-08-25 08:05:29 -0700 | [diff] [blame] | 165 | void ImageHeader::VisitObjects(ObjectVisitor* visitor, |
| 166 | uint8_t* base, |
| 167 | PointerSize pointer_size) const { |
| 168 | DCHECK_EQ(pointer_size, GetPointerSize()); |
| 169 | const ImageSection& objects = GetObjectsSection(); |
| 170 | static const size_t kStartPos = RoundUp(sizeof(ImageHeader), kObjectAlignment); |
| 171 | for (size_t pos = kStartPos; pos < objects.Size(); ) { |
| 172 | mirror::Object* object = reinterpret_cast<mirror::Object*>(base + objects.Offset() + pos); |
| 173 | visitor->Visit(object); |
| 174 | pos += RoundUp(object->SizeOf(), kObjectAlignment); |
| 175 | } |
| 176 | } |
| 177 | |
Andreas Gampe | bda1d60 | 2016-08-29 17:43:45 -0700 | [diff] [blame] | 178 | PointerSize ImageHeader::GetPointerSize() const { |
| 179 | return ConvertToPointerSize(pointer_size_); |
| 180 | } |
| 181 | |
Ulya Trafimovich | a97cddd | 2022-09-28 11:22:09 +0100 | [diff] [blame] | 182 | bool LZ4_decompress_safe_checked(const char* source, |
| 183 | char* dest, |
| 184 | int compressed_size, |
| 185 | int max_decompressed_size, |
| 186 | /*out*/ size_t* decompressed_size_checked, |
| 187 | /*out*/ std::string* error_msg) { |
| 188 | int decompressed_size = LZ4_decompress_safe(source, dest, compressed_size, max_decompressed_size); |
| 189 | if (UNLIKELY(decompressed_size < 0)) { |
| 190 | *error_msg = android::base::StringPrintf("LZ4_decompress_safe() returned negative size: %d", |
| 191 | decompressed_size); |
| 192 | return false; |
| 193 | } else { |
| 194 | *decompressed_size_checked = static_cast<size_t>(decompressed_size); |
| 195 | return true; |
| 196 | } |
| 197 | } |
| 198 | |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 199 | bool ImageHeader::Block::Decompress(uint8_t* out_ptr, |
| 200 | const uint8_t* in_ptr, |
| 201 | std::string* error_msg) const { |
| 202 | switch (storage_mode_) { |
| 203 | case kStorageModeUncompressed: { |
| 204 | CHECK_EQ(image_size_, data_size_); |
| 205 | memcpy(out_ptr + image_offset_, in_ptr + data_offset_, data_size_); |
| 206 | break; |
| 207 | } |
| 208 | case kStorageModeLZ4: |
| 209 | case kStorageModeLZ4HC: { |
| 210 | // LZ4HC and LZ4 have same internal format, both use LZ4_decompress. |
Ulya Trafimovich | a97cddd | 2022-09-28 11:22:09 +0100 | [diff] [blame] | 211 | size_t decompressed_size; |
| 212 | bool ok = LZ4_decompress_safe_checked( |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 213 | reinterpret_cast<const char*>(in_ptr) + data_offset_, |
| 214 | reinterpret_cast<char*>(out_ptr) + image_offset_, |
| 215 | data_size_, |
Ulya Trafimovich | a97cddd | 2022-09-28 11:22:09 +0100 | [diff] [blame] | 216 | image_size_, |
| 217 | &decompressed_size, |
| 218 | error_msg); |
| 219 | if (!ok) { |
| 220 | return false; |
| 221 | } |
Mathieu Chartier | 1a84296 | 2018-11-13 15:09:51 -0800 | [diff] [blame] | 222 | CHECK_EQ(decompressed_size, image_size_); |
| 223 | break; |
| 224 | } |
| 225 | default: { |
| 226 | if (error_msg != nullptr) { |
| 227 | *error_msg = (std::ostringstream() << "Invalid image format " << storage_mode_).str(); |
| 228 | } |
| 229 | return false; |
| 230 | } |
| 231 | } |
| 232 | return true; |
| 233 | } |
| 234 | |
David Srbecky | bc1748f | 2021-02-22 10:51:32 +0000 | [diff] [blame] | 235 | const char* ImageHeader::GetImageSectionName(ImageSections index) { |
| 236 | switch (index) { |
| 237 | case kSectionObjects: return "Objects"; |
| 238 | case kSectionArtFields: return "ArtFields"; |
| 239 | case kSectionArtMethods: return "ArtMethods"; |
| 240 | case kSectionRuntimeMethods: return "RuntimeMethods"; |
| 241 | case kSectionImTables: return "ImTables"; |
| 242 | case kSectionIMTConflictTables: return "IMTConflictTables"; |
| 243 | case kSectionInternedStrings: return "InternedStrings"; |
| 244 | case kSectionClassTable: return "ClassTable"; |
| 245 | case kSectionStringReferenceOffsets: return "StringReferenceOffsets"; |
Nicolas Geoffray | 1905f32 | 2023-02-13 13:18:10 +0000 | [diff] [blame] | 246 | case kSectionDexCacheArrays: return "DexCacheArrays"; |
David Srbecky | bc1748f | 2021-02-22 10:51:32 +0000 | [diff] [blame] | 247 | case kSectionMetadata: return "Metadata"; |
| 248 | case kSectionImageBitmap: return "ImageBitmap"; |
| 249 | case kSectionCount: return nullptr; |
| 250 | } |
| 251 | } |
| 252 | |
Nicolas Geoffray | bc50ac1 | 2023-02-23 14:03:55 +0000 | [diff] [blame] | 253 | // If `image_storage_mode` is compressed, compress data from `source` |
| 254 | // into `storage`, and return an array pointing to the compressed. |
| 255 | // If the mode is uncompressed, just return an array pointing to `source`. |
| 256 | static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source, |
| 257 | ImageHeader::StorageMode image_storage_mode, |
| 258 | /*out*/ dchecked_vector<uint8_t>* storage) { |
| 259 | const uint64_t compress_start_time = NanoTime(); |
| 260 | |
| 261 | switch (image_storage_mode) { |
| 262 | case ImageHeader::kStorageModeLZ4: { |
| 263 | storage->resize(LZ4_compressBound(source.size())); |
| 264 | size_t data_size = LZ4_compress_default( |
| 265 | reinterpret_cast<char*>(const_cast<uint8_t*>(source.data())), |
| 266 | reinterpret_cast<char*>(storage->data()), |
| 267 | source.size(), |
| 268 | storage->size()); |
| 269 | storage->resize(data_size); |
| 270 | break; |
| 271 | } |
| 272 | case ImageHeader::kStorageModeLZ4HC: { |
| 273 | // Bound is same as non HC. |
| 274 | storage->resize(LZ4_compressBound(source.size())); |
| 275 | size_t data_size = LZ4_compress_HC( |
| 276 | reinterpret_cast<const char*>(const_cast<uint8_t*>(source.data())), |
| 277 | reinterpret_cast<char*>(storage->data()), |
| 278 | source.size(), |
| 279 | storage->size(), |
| 280 | LZ4HC_CLEVEL_MAX); |
| 281 | storage->resize(data_size); |
| 282 | break; |
| 283 | } |
| 284 | case ImageHeader::kStorageModeUncompressed: { |
| 285 | return source; |
| 286 | } |
| 287 | default: { |
| 288 | LOG(FATAL) << "Unsupported"; |
| 289 | UNREACHABLE(); |
| 290 | } |
| 291 | } |
| 292 | |
| 293 | DCHECK(image_storage_mode == ImageHeader::kStorageModeLZ4 || |
| 294 | image_storage_mode == ImageHeader::kStorageModeLZ4HC); |
| 295 | VLOG(image) << "Compressed from " << source.size() << " to " << storage->size() << " in " |
| 296 | << PrettyDuration(NanoTime() - compress_start_time); |
| 297 | if (kIsDebugBuild) { |
| 298 | dchecked_vector<uint8_t> decompressed(source.size()); |
| 299 | size_t decompressed_size; |
| 300 | std::string error_msg; |
| 301 | bool ok = LZ4_decompress_safe_checked( |
| 302 | reinterpret_cast<char*>(storage->data()), |
| 303 | reinterpret_cast<char*>(decompressed.data()), |
| 304 | storage->size(), |
| 305 | decompressed.size(), |
| 306 | &decompressed_size, |
| 307 | &error_msg); |
| 308 | if (!ok) { |
| 309 | LOG(FATAL) << error_msg; |
| 310 | UNREACHABLE(); |
| 311 | } |
| 312 | CHECK_EQ(decompressed_size, decompressed.size()); |
| 313 | CHECK_EQ(memcmp(source.data(), decompressed.data(), source.size()), 0) << image_storage_mode; |
| 314 | } |
| 315 | return ArrayRef<const uint8_t>(*storage); |
| 316 | } |
| 317 | |
| 318 | bool ImageHeader::WriteData(const ImageFileGuard& image_file, |
| 319 | const uint8_t* data, |
| 320 | const uint8_t* bitmap_data, |
| 321 | ImageHeader::StorageMode image_storage_mode, |
| 322 | uint32_t max_image_block_size, |
| 323 | bool update_checksum, |
| 324 | std::string* error_msg) { |
| 325 | const bool is_compressed = image_storage_mode != ImageHeader::kStorageModeUncompressed; |
| 326 | dchecked_vector<std::pair<uint32_t, uint32_t>> block_sources; |
| 327 | dchecked_vector<ImageHeader::Block> blocks; |
| 328 | |
| 329 | // Add a set of solid blocks such that no block is larger than the maximum size. A solid block |
| 330 | // is a block that must be decompressed all at once. |
| 331 | auto add_blocks = [&](uint32_t offset, uint32_t size) { |
| 332 | while (size != 0u) { |
| 333 | const uint32_t cur_size = std::min(size, max_image_block_size); |
| 334 | block_sources.emplace_back(offset, cur_size); |
| 335 | offset += cur_size; |
| 336 | size -= cur_size; |
| 337 | } |
| 338 | }; |
| 339 | |
| 340 | add_blocks(sizeof(ImageHeader), this->GetImageSize() - sizeof(ImageHeader)); |
| 341 | |
| 342 | // Checksum of compressed image data and header. |
| 343 | uint32_t image_checksum = 0u; |
| 344 | if (update_checksum) { |
| 345 | image_checksum = adler32(0L, Z_NULL, 0); |
| 346 | image_checksum = adler32(image_checksum, |
| 347 | reinterpret_cast<const uint8_t*>(this), |
| 348 | sizeof(ImageHeader)); |
| 349 | } |
| 350 | |
| 351 | // Copy and compress blocks. |
| 352 | uint32_t out_offset = sizeof(ImageHeader); |
| 353 | for (const std::pair<uint32_t, uint32_t> block : block_sources) { |
| 354 | ArrayRef<const uint8_t> raw_image_data(data + block.first, block.second); |
| 355 | dchecked_vector<uint8_t> compressed_data; |
| 356 | ArrayRef<const uint8_t> image_data = |
| 357 | MaybeCompressData(raw_image_data, image_storage_mode, &compressed_data); |
| 358 | |
| 359 | if (!is_compressed) { |
| 360 | // For uncompressed, preserve alignment since the image will be directly mapped. |
| 361 | out_offset = block.first; |
| 362 | } |
| 363 | |
| 364 | // Fill in the compressed location of the block. |
| 365 | blocks.emplace_back(ImageHeader::Block( |
| 366 | image_storage_mode, |
| 367 | /*data_offset=*/ out_offset, |
| 368 | /*data_size=*/ image_data.size(), |
| 369 | /*image_offset=*/ block.first, |
| 370 | /*image_size=*/ block.second)); |
| 371 | |
| 372 | if (!image_file->PwriteFully(image_data.data(), image_data.size(), out_offset)) { |
| 373 | *error_msg = "Failed to write image file data " + |
| 374 | image_file->GetPath() + ": " + std::string(strerror(errno)); |
| 375 | return false; |
| 376 | } |
| 377 | out_offset += image_data.size(); |
| 378 | if (update_checksum) { |
| 379 | image_checksum = adler32(image_checksum, image_data.data(), image_data.size()); |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | if (is_compressed) { |
| 384 | // Align up since the compressed data is not necessarily aligned. |
| 385 | out_offset = RoundUp(out_offset, alignof(ImageHeader::Block)); |
| 386 | CHECK(!blocks.empty()); |
| 387 | const size_t blocks_bytes = blocks.size() * sizeof(blocks[0]); |
| 388 | if (!image_file->PwriteFully(&blocks[0], blocks_bytes, out_offset)) { |
| 389 | *error_msg = "Failed to write image blocks " + |
| 390 | image_file->GetPath() + ": " + std::string(strerror(errno)); |
| 391 | return false; |
| 392 | } |
| 393 | this->blocks_offset_ = out_offset; |
| 394 | this->blocks_count_ = blocks.size(); |
| 395 | out_offset += blocks_bytes; |
| 396 | } |
| 397 | |
| 398 | // Data size includes everything except the bitmap. |
| 399 | this->data_size_ = out_offset - sizeof(ImageHeader); |
| 400 | |
| 401 | // Update and write the bitmap section. Note that the bitmap section is relative to the |
| 402 | // possibly compressed image. |
| 403 | ImageSection& bitmap_section = GetImageSection(ImageHeader::kSectionImageBitmap); |
| 404 | // Align up since data size may be unaligned if the image is compressed. |
| 405 | out_offset = RoundUp(out_offset, kPageSize); |
| 406 | bitmap_section = ImageSection(out_offset, bitmap_section.Size()); |
| 407 | |
| 408 | if (!image_file->PwriteFully(bitmap_data, |
| 409 | bitmap_section.Size(), |
| 410 | bitmap_section.Offset())) { |
| 411 | *error_msg = "Failed to write image file bitmap " + |
| 412 | image_file->GetPath() + ": " + std::string(strerror(errno)); |
| 413 | return false; |
| 414 | } |
| 415 | |
| 416 | int err = image_file->Flush(); |
| 417 | if (err < 0) { |
| 418 | *error_msg = "Failed to flush image file " + image_file->GetPath() + ": " + std::to_string(err); |
| 419 | return false; |
| 420 | } |
| 421 | |
| 422 | if (update_checksum) { |
| 423 | // Calculate the image checksum of the remaining data. |
| 424 | image_checksum = adler32(GetImageChecksum(), |
| 425 | reinterpret_cast<const uint8_t*>(bitmap_data), |
| 426 | bitmap_section.Size()); |
| 427 | this->SetImageChecksum(image_checksum); |
| 428 | } |
| 429 | |
| 430 | if (VLOG_IS_ON(image)) { |
| 431 | const size_t separately_written_section_size = bitmap_section.Size(); |
| 432 | const size_t total_uncompressed_size = image_size_ + separately_written_section_size; |
| 433 | const size_t total_compressed_size = out_offset + separately_written_section_size; |
| 434 | |
| 435 | VLOG(compiler) << "UncompressedImageSize = " << total_uncompressed_size; |
| 436 | if (total_uncompressed_size != total_compressed_size) { |
| 437 | VLOG(compiler) << "CompressedImageSize = " << total_compressed_size; |
| 438 | } |
| 439 | } |
| 440 | |
| 441 | DCHECK_EQ(bitmap_section.End(), static_cast<size_t>(image_file->GetLength())) |
| 442 | << "Bitmap should be at the end of the file"; |
| 443 | return true; |
| 444 | } |
| 445 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 446 | } // namespace art |