update_engine: Deprecate major version 1

We have moved away from major version 1 in Chrome OS and already have a
stepping stone for it in M53. So this cleanup makes the code much easier
to understand.

BUG=chromium:1008553
TEST=FEATURES="test" sudo emerge update_engine update_payload
TEST=cros_generate_update_payload --image chromiumos_test_image.bin --check --output delta.bin

Change-Id: I01815dfa5fdf395f8214ef162e01ecca2d42f7fc
Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1857459
Tested-by: Amin Hassani <[email protected]>
Reviewed-by: Sen Jiang <[email protected]>
Commit-Queue: Amin Hassani <[email protected]>
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index cc39943..ee5f38c 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -713,8 +713,7 @@
 
   // In major version 2, we don't add dummy operation to the payload.
   // If we already extracted the signature we should skip this step.
-  if (major_payload_version_ == kBrilloMajorPayloadVersion &&
-      manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
+  if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
       signatures_message_data_.empty()) {
     if (manifest_.signatures_offset() != buffer_offset_) {
       LOG(ERROR) << "Payload signatures offset points to blob offset "
@@ -749,51 +748,11 @@
 }
 
 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
-  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    partitions_.clear();
-    for (const PartitionUpdate& partition : manifest_.partitions()) {
-      partitions_.push_back(partition);
-    }
-    manifest_.clear_partitions();
-  } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
-    LOG(INFO) << "Converting update information from old format.";
-    PartitionUpdate root_part;
-    root_part.set_partition_name(kPartitionNameRoot);
-#ifdef __ANDROID__
-    LOG(WARNING) << "Legacy payload major version provided to an Android "
-                    "build. Assuming no post-install. Please use major version "
-                    "2 or newer.";
-    root_part.set_run_postinstall(false);
-#else
-    root_part.set_run_postinstall(true);
-#endif  // __ANDROID__
-    if (manifest_.has_old_rootfs_info()) {
-      *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
-      manifest_.clear_old_rootfs_info();
-    }
-    if (manifest_.has_new_rootfs_info()) {
-      *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
-      manifest_.clear_new_rootfs_info();
-    }
-    *root_part.mutable_operations() = manifest_.install_operations();
-    manifest_.clear_install_operations();
-    partitions_.push_back(std::move(root_part));
-
-    PartitionUpdate kern_part;
-    kern_part.set_partition_name(kPartitionNameKernel);
-    kern_part.set_run_postinstall(false);
-    if (manifest_.has_old_kernel_info()) {
-      *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
-      manifest_.clear_old_kernel_info();
-    }
-    if (manifest_.has_new_kernel_info()) {
-      *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
-      manifest_.clear_new_kernel_info();
-    }
-    *kern_part.mutable_operations() = manifest_.kernel_install_operations();
-    manifest_.clear_kernel_install_operations();
-    partitions_.push_back(std::move(kern_part));
+  partitions_.clear();
+  for (const PartitionUpdate& partition : manifest_.partitions()) {
+    partitions_.push_back(partition);
   }
+  manifest_.clear_partitions();
 
   // Fill in the InstallPlan::partitions based on the partitions from the
   // payload.
@@ -954,14 +913,6 @@
   TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
   TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
 
-  // Extract the signature message if it's in this operation.
-  if (ExtractSignatureMessageFromOperation(operation)) {
-    // If this is dummy replace operation, we ignore it after extracting the
-    // signature.
-    DiscardBuffer(true, 0);
-    return true;
-  }
-
   // Setup the ExtentWriter stack based on the operation type.
   std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
 
@@ -1412,19 +1363,6 @@
   return true;
 }
 
-bool DeltaPerformer::ExtractSignatureMessageFromOperation(
-    const InstallOperation& operation) {
-  if (operation.type() != InstallOperation::REPLACE ||
-      !manifest_.has_signatures_offset() ||
-      manifest_.signatures_offset() != operation.data_offset()) {
-    return false;
-  }
-  TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
-                        manifest_.signatures_size() == operation.data_length());
-  TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
-  return true;
-}
-
 bool DeltaPerformer::ExtractSignatureMessage() {
   TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
   TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
@@ -1476,11 +1414,11 @@
   // Perform assorted checks to sanity check the manifest, make sure it
   // matches data from other sources, and that it is a supported version.
 
-  bool has_old_fields =
-      (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
-  for (const PartitionUpdate& partition : manifest_.partitions()) {
-    has_old_fields = has_old_fields || partition.has_old_partition_info();
-  }
+  bool has_old_fields = std::any_of(manifest_.partitions().begin(),
+                                    manifest_.partitions().end(),
+                                    [](const PartitionUpdate& partition) {
+                                      return partition.has_old_partition_info();
+                                    });
 
   // The presence of an old partition hash is the sole indicator for a delta
   // update.
@@ -1522,16 +1460,12 @@
     }
   }
 
-  if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
-    if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() ||
-        manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() ||
-        manifest_.install_operations_size() != 0 ||
-        manifest_.kernel_install_operations_size() != 0) {
-      LOG(ERROR) << "Manifest contains deprecated field only supported in "
-                 << "major payload version 1, but the payload major version is "
-                 << major_payload_version_;
-      return ErrorCode::kPayloadMismatchedType;
-    }
+  if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() ||
+      manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() ||
+      manifest_.install_operations_size() != 0 ||
+      manifest_.kernel_install_operations_size() != 0) {
+    LOG(ERROR) << "Manifest contains deprecated fields.";
+    return ErrorCode::kPayloadMismatchedType;
   }
 
   if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
@@ -1542,18 +1476,8 @@
     return ErrorCode::kPayloadTimestampError;
   }
 
-  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
-    if (manifest_.has_dynamic_partition_metadata()) {
-      LOG(ERROR)
-          << "Should not contain dynamic_partition_metadata for major version "
-          << kChromeOSMajorPayloadVersion
-          << ". Please use major version 2 or above.";
-      return ErrorCode::kPayloadMismatchedType;
-    }
-  }
-
-  // TODO(garnold) we should be adding more and more manifest checks, such as
-  // partition boundaries etc (see chromium-os:37661).
+  // TODO(crbug.com/37661) we should be adding more and more manifest checks,
+  // such as partition boundaries, etc.
 
   return ErrorCode::kSuccess;
 }
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 4493c2a..7860747 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -237,11 +237,6 @@
   FileDescriptorPtr ChooseSourceFD(const InstallOperation& operation,
                                    ErrorCode* error);
 
-  // Extracts the payload signature message from the blob on the |operation| if
-  // the offset matches the one specified by the manifest. Returns whether the
-  // signature was extracted.
-  bool ExtractSignatureMessageFromOperation(const InstallOperation& operation);
-
   // Extracts the payload signature message from the current |buffer_| if the
   // offset matches the one specified by the manifest. Returns whether the
   // signature was extracted.
diff --git a/payload_consumer/delta_performer_integration_test.cc b/payload_consumer/delta_performer_integration_test.cc
index 904ea5a..5f55739 100644
--- a/payload_consumer/delta_performer_integration_test.cc
+++ b/payload_consumer/delta_performer_integration_test.cc
@@ -76,6 +76,7 @@
 
   string delta_path;
   uint64_t metadata_size;
+  uint32_t metadata_signature_size;
 
   string old_kernel;
   brillo::Blob old_kernel_data;
@@ -187,17 +188,32 @@
                                  uint64_t* out_metadata_size) {
   string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
   int signature_size = GetSignatureSize(private_key_path);
-  brillo::Blob hash;
+  brillo::Blob metadata_hash, payload_hash;
   ASSERT_TRUE(PayloadSigner::HashPayloadForSigning(
-      payload_path, {signature_size}, &hash, nullptr));
-  brillo::Blob signature;
-  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
-  ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(
-      payload_path, {signature}, {}, payload_path, out_metadata_size));
+      payload_path, {signature_size}, &payload_hash, &metadata_hash));
+  brillo::Blob metadata_signature, payload_signature;
+  ASSERT_TRUE(PayloadSigner::SignHash(
+      payload_hash, private_key_path, &payload_signature));
+  ASSERT_TRUE(PayloadSigner::SignHash(
+      metadata_hash, private_key_path, &metadata_signature));
+  ASSERT_TRUE(PayloadSigner::AddSignatureToPayload(payload_path,
+                                                   {payload_signature},
+                                                   {metadata_signature},
+                                                   payload_path,
+                                                   out_metadata_size));
   EXPECT_TRUE(PayloadSigner::VerifySignedPayload(
       payload_path, GetBuildArtifactsPath(kUnittestPublicKeyPath)));
 }
 
+static void SignHashToFile(const string& hash_file,
+                           const string& signature_file,
+                           const string& private_key_file) {
+  brillo::Blob hash, signature;
+  ASSERT_TRUE(utils::ReadFile(hash_file, &hash));
+  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_file, &signature));
+  ASSERT_TRUE(test_utils::WriteFileVector(signature_file, signature));
+}
+
 static void SignGeneratedShellPayload(SignatureTest signature_test,
                                       const string& payload_path) {
   string private_key_path = GetBuildArtifactsPath(kUnittestPrivateKeyPath);
@@ -230,7 +246,8 @@
     RSA_free(rsa);
   }
   int signature_size = GetSignatureSize(private_key_path);
-  test_utils::ScopedTempFile hash_file("hash.XXXXXX");
+  test_utils::ScopedTempFile payload_hash_file("hash.XXXXXX"),
+      metadata_hash_file("hash.XXXXXX");
   string signature_size_string;
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2)
@@ -241,38 +258,51 @@
   string delta_generator_path = GetBuildArtifactsPath("delta_generator");
   ASSERT_EQ(0,
             System(base::StringPrintf(
-                "%s -in_file=%s -signature_size=%s -out_hash_file=%s",
+                "%s -in_file=%s -signature_size=%s -out_hash_file=%s "
+                "-out_metadata_hash_file=%s",
                 delta_generator_path.c_str(),
                 payload_path.c_str(),
                 signature_size_string.c_str(),
-                hash_file.path().c_str())));
+                payload_hash_file.path().c_str(),
+                metadata_hash_file.path().c_str())));
 
-  // Sign the hash
-  brillo::Blob hash, signature;
-  ASSERT_TRUE(utils::ReadFile(hash_file.path(), &hash));
-  ASSERT_TRUE(PayloadSigner::SignHash(hash, private_key_path, &signature));
+  // Sign the payload hash.
+  test_utils::ScopedTempFile payload_signature_file("signature.XXXXXX");
+  SignHashToFile(payload_hash_file.path(),
+                 payload_signature_file.path(),
+                 private_key_path);
+  string payload_sig_files = payload_signature_file.path();
+  // Sign the metadata hash.
+  test_utils::ScopedTempFile metadata_signature_file("signature.XXXXXX");
+  SignHashToFile(metadata_hash_file.path(),
+                 metadata_signature_file.path(),
+                 private_key_path);
+  string metadata_sig_files = metadata_signature_file.path();
 
-  test_utils::ScopedTempFile sig_file("signature.XXXXXX");
-  ASSERT_TRUE(test_utils::WriteFileVector(sig_file.path(), signature));
-  string sig_files = sig_file.path();
-
-  test_utils::ScopedTempFile sig_file2("signature.XXXXXX");
+  test_utils::ScopedTempFile payload_signature_file2("signature.XXXXXX");
+  test_utils::ScopedTempFile metadata_signature_file2("signature.XXXXXX");
   if (signature_test == kSignatureGeneratedShellRotateCl1 ||
       signature_test == kSignatureGeneratedShellRotateCl2) {
-    ASSERT_TRUE(PayloadSigner::SignHash(
-        hash, GetBuildArtifactsPath(kUnittestPrivateKey2Path), &signature));
-    ASSERT_TRUE(test_utils::WriteFileVector(sig_file2.path(), signature));
+    SignHashToFile(payload_hash_file.path(),
+                   payload_signature_file2.path(),
+                   GetBuildArtifactsPath(kUnittestPrivateKey2Path));
+    SignHashToFile(metadata_hash_file.path(),
+                   metadata_signature_file2.path(),
+                   GetBuildArtifactsPath(kUnittestPrivateKey2Path));
     // Append second sig file to first path
-    sig_files += ":" + sig_file2.path();
+    payload_sig_files += ":" + payload_signature_file2.path();
+    metadata_sig_files += ":" + metadata_signature_file2.path();
   }
 
-  ASSERT_EQ(0,
-            System(base::StringPrintf(
-                "%s -in_file=%s -payload_signature_file=%s -out_file=%s",
-                delta_generator_path.c_str(),
-                payload_path.c_str(),
-                sig_files.c_str(),
-                payload_path.c_str())));
+  ASSERT_EQ(
+      0,
+      System(base::StringPrintf("%s -in_file=%s -payload_signature_file=%s "
+                                "-metadata_signature_file=%s -out_file=%s",
+                                delta_generator_path.c_str(),
+                                payload_path.c_str(),
+                                payload_sig_files.c_str(),
+                                metadata_sig_files.c_str(),
+                                payload_path.c_str())));
   int verify_result = System(base::StringPrintf(
       "%s -in_file=%s -public_key=%s -public_key_version=%d",
       delta_generator_path.c_str(),
@@ -474,7 +504,7 @@
     payload_config.is_delta = !full_rootfs;
     payload_config.hard_chunk_size = chunk_size;
     payload_config.rootfs_partition_size = kRootFSPartitionSize;
-    payload_config.version.major = kChromeOSMajorPayloadVersion;
+    payload_config.version.major = kBrilloMajorPayloadVersion;
     payload_config.version.minor = minor_version;
     if (!full_rootfs) {
       payload_config.source.partitions.emplace_back(kPartitionNameRoot);
@@ -564,6 +594,9 @@
     EXPECT_TRUE(payload_metadata.ParsePayloadHeader(state->delta));
     state->metadata_size = payload_metadata.GetMetadataSize();
     LOG(INFO) << "Metadata size: " << state->metadata_size;
+    state->metadata_signature_size =
+        payload_metadata.GetMetadataSignatureSize();
+    LOG(INFO) << "Metadata signature size: " << state->metadata_signature_size;
 
     DeltaArchiveManifest manifest;
     EXPECT_TRUE(payload_metadata.GetManifest(state->delta, &manifest));
@@ -575,7 +608,8 @@
       EXPECT_TRUE(manifest.has_signatures_size());
       Signatures sigs_message;
       EXPECT_TRUE(sigs_message.ParseFromArray(
-          &state->delta[state->metadata_size + manifest.signatures_offset()],
+          &state->delta[state->metadata_size + state->metadata_signature_size +
+                        manifest.signatures_offset()],
           manifest.signatures_size()));
       if (signature_test == kSignatureGeneratedShellRotateCl1 ||
           signature_test == kSignatureGeneratedShellRotateCl2)
@@ -597,13 +631,38 @@
       EXPECT_FALSE(signature.data().empty());
     }
 
+    // TODO(ahassani): Make |DeltaState| into a partition list kind of struct
+    // instead of hardcoded kernel/rootfs so its cleaner and we can make the
+    // following code into a helper function instead.
+    const auto& kernel_part = *std::find_if(
+        manifest.partitions().begin(),
+        manifest.partitions().end(),
+        [](const PartitionUpdate& partition) {
+          return partition.partition_name() == kPartitionNameKernel;
+        });
     if (full_kernel) {
-      EXPECT_FALSE(manifest.has_old_kernel_info());
+      EXPECT_FALSE(kernel_part.has_old_partition_info());
     } else {
       EXPECT_EQ(state->old_kernel_data.size(),
-                manifest.old_kernel_info().size());
-      EXPECT_FALSE(manifest.old_kernel_info().hash().empty());
+                kernel_part.old_partition_info().size());
+      EXPECT_FALSE(kernel_part.old_partition_info().hash().empty());
     }
+    EXPECT_EQ(state->new_kernel_data.size(),
+              kernel_part.new_partition_info().size());
+    EXPECT_FALSE(kernel_part.new_partition_info().hash().empty());
+
+    const auto& rootfs_part =
+        *std::find_if(manifest.partitions().begin(),
+                      manifest.partitions().end(),
+                      [](const PartitionUpdate& partition) {
+                        return partition.partition_name() == kPartitionNameRoot;
+                      });
+    if (full_rootfs) {
+      EXPECT_FALSE(rootfs_part.has_old_partition_info());
+    } else {
+      EXPECT_FALSE(rootfs_part.old_partition_info().hash().empty());
+    }
+    EXPECT_FALSE(rootfs_part.new_partition_info().hash().empty());
 
     EXPECT_EQ(manifest.new_image_info().channel(), "test-channel");
     EXPECT_EQ(manifest.new_image_info().board(), "test-board");
@@ -620,27 +679,14 @@
       EXPECT_EQ(manifest.old_image_info().build_channel(), "src-build-channel");
       EXPECT_EQ(manifest.old_image_info().build_version(), "src-build-version");
     }
-
-    if (full_rootfs) {
-      EXPECT_FALSE(manifest.has_old_rootfs_info());
-      EXPECT_FALSE(manifest.has_old_image_info());
-      EXPECT_TRUE(manifest.has_new_image_info());
-    } else {
-      EXPECT_EQ(state->image_size, manifest.old_rootfs_info().size());
-      EXPECT_FALSE(manifest.old_rootfs_info().hash().empty());
-    }
-
-    EXPECT_EQ(state->new_kernel_data.size(), manifest.new_kernel_info().size());
-    EXPECT_EQ(state->image_size, manifest.new_rootfs_info().size());
-
-    EXPECT_FALSE(manifest.new_kernel_info().hash().empty());
-    EXPECT_FALSE(manifest.new_rootfs_info().hash().empty());
   }
 
   MockPrefs prefs;
   EXPECT_CALL(prefs, SetInt64(kPrefsManifestMetadataSize, state->metadata_size))
       .WillOnce(Return(true));
-  EXPECT_CALL(prefs, SetInt64(kPrefsManifestSignatureSize, 0))
+  EXPECT_CALL(
+      prefs,
+      SetInt64(kPrefsManifestSignatureSize, state->metadata_signature_size))
       .WillOnce(Return(true));
   EXPECT_CALL(prefs, SetInt64(kPrefsUpdateStateNextOperation, _))
       .WillRepeatedly(Return(true));
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 61b58ed..0671eca 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -304,14 +304,16 @@
 
     // Set a valid magic string and version number 1.
     EXPECT_TRUE(performer_.Write("CrAU", 4));
-    uint64_t version = htobe64(kChromeOSMajorPayloadVersion);
+    uint64_t version = htobe64(kBrilloMajorPayloadVersion);
     EXPECT_TRUE(performer_.Write(&version, 8));
 
     payload_.metadata_size = expected_metadata_size;
     ErrorCode error_code;
-    // When filling in size in manifest, exclude the size of the 20-byte header.
-    uint64_t size_in_manifest = htobe64(actual_metadata_size - 20);
-    bool result = performer_.Write(&size_in_manifest, 8, &error_code);
+    // When filling in size in manifest, exclude the size of the 24-byte header.
+    uint64_t size_in_manifest = htobe64(actual_metadata_size - 24);
+    performer_.Write(&size_in_manifest, 8, &error_code);
+    uint32_t signature_size = htobe64(10);
+    bool result = performer_.Write(&signature_size, 4, &error_code);
     if (expected_metadata_size == actual_metadata_size ||
         !hash_checks_mandatory) {
       EXPECT_TRUE(result);
@@ -333,7 +335,7 @@
     brillo::Blob payload = GeneratePayload(brillo::Blob(),
                                            vector<AnnotatedOperation>(),
                                            sign_payload,
-                                           kChromeOSMajorPayloadVersion,
+                                           kBrilloMajorPayloadVersion,
                                            kFullPayloadMinorVersion);
 
     LOG(INFO) << "Payload size: " << payload.size();
@@ -347,6 +349,9 @@
     switch (metadata_signature_test) {
       case kEmptyMetadataSignature:
         payload_.metadata_signature.clear();
+        // We need to set the signature size in a signed payload to zero.
+        std::fill(
+            std::next(payload.begin(), 20), std::next(payload.begin(), 24), 0);
         expected_result = MetadataParseResult::kError;
         expected_error = ErrorCode::kDownloadMetadataSignatureMissingError;
         break;
@@ -447,7 +452,7 @@
   brillo::Blob payload_data = GeneratePayload(expected_data,
                                               aops,
                                               false,
-                                              kChromeOSMajorPayloadVersion,
+                                              kBrilloMajorPayloadVersion,
                                               kFullPayloadMinorVersion);
 
   EXPECT_EQ(expected_data, ApplyPayload(payload_data, "/dev/null", true));
@@ -469,7 +474,7 @@
   brillo::Blob payload_data = GeneratePayload(expected_data,
                                               aops,
                                               false,
-                                              kChromeOSMajorPayloadVersion,
+                                              kBrilloMajorPayloadVersion,
                                               kFullPayloadMinorVersion);
 
   testing::Mock::VerifyAndClearExpectations(&mock_delegate_);
@@ -725,27 +730,32 @@
 TEST_F(DeltaPerformerTest, ValidateManifestFullGoodTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_new_partition_info();
+  }
   manifest.set_minor_version(kFullPayloadMinorVersion);
 
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kSuccess);
 }
 
-TEST_F(DeltaPerformerTest, ValidateManifestDeltaGoodTest) {
+TEST_F(DeltaPerformerTest, ValidateManifestDeltaMaxGoodTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_old_kernel_info();
-  manifest.mutable_old_rootfs_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_old_partition_info();
+    part->mutable_new_partition_info();
+  }
   manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kSuccess);
 }
@@ -753,14 +763,16 @@
 TEST_F(DeltaPerformerTest, ValidateManifestDeltaMinGoodTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_old_kernel_info();
-  manifest.mutable_old_rootfs_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_old_partition_info();
+    part->mutable_new_partition_info();
+  }
   manifest.set_minor_version(kMinSupportedMinorPayloadVersion);
 
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kDelta,
                         ErrorCode::kSuccess);
 }
@@ -778,9 +790,11 @@
 TEST_F(DeltaPerformerTest, ValidateManifestDeltaUnsetMinorVersion) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  // Add an empty old_rootfs_info() to trick the DeltaPerformer into think that
-  // this is a delta payload manifest with a missing minor version.
-  manifest.mutable_old_rootfs_info();
+  // Add an empty rootfs partition info to trick the DeltaPerformer into think
+  // that this is a delta payload manifest with a missing minor version.
+  auto rootfs = manifest.add_partitions();
+  rootfs->set_partition_name("rootfs");
+  rootfs->mutable_old_partition_info();
 
   RunManifestValidation(manifest,
                         kMaxSupportedMajorPayloadVersion,
@@ -791,27 +805,15 @@
 TEST_F(DeltaPerformerTest, ValidateManifestFullOldKernelTest) {
   // The Manifest we are validating.
   DeltaArchiveManifest manifest;
-  manifest.mutable_old_kernel_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
-
+  for (const auto& part_name : {"kernel", "rootfs"}) {
+    auto part = manifest.add_partitions();
+    part->set_partition_name(part_name);
+    part->mutable_old_partition_info();
+    part->mutable_new_partition_info();
+  }
+  manifest.mutable_partitions(0)->clear_old_partition_info();
   RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
-                        InstallPayloadType::kFull,
-                        ErrorCode::kPayloadMismatchedType);
-}
-
-TEST_F(DeltaPerformerTest, ValidateManifestFullOldRootfsTest) {
-  // The Manifest we are validating.
-  DeltaArchiveManifest manifest;
-  manifest.mutable_old_rootfs_info();
-  manifest.mutable_new_kernel_info();
-  manifest.mutable_new_rootfs_info();
-  manifest.set_minor_version(kMaxSupportedMinorPayloadVersion);
-
-  RunManifestValidation(manifest,
-                        kChromeOSMajorPayloadVersion,
+                        kBrilloMajorPayloadVersion,
                         InstallPayloadType::kFull,
                         ErrorCode::kPayloadMismatchedType);
 }
@@ -836,8 +838,8 @@
 
   // Generate a bad version number.
   manifest.set_minor_version(kMaxSupportedMinorPayloadVersion + 10000);
-  // Mark the manifest as a delta payload by setting old_rootfs_info.
-  manifest.mutable_old_rootfs_info();
+  // Mark the manifest as a delta payload by setting |old_partition_info|.
+  manifest.add_partitions()->mutable_old_partition_info();
 
   RunManifestValidation(manifest,
                         kMaxSupportedMajorPayloadVersion,
diff --git a/payload_consumer/payload_constants.cc b/payload_consumer/payload_constants.cc
index 9e684d7..908a893 100644
--- a/payload_consumer/payload_constants.cc
+++ b/payload_consumer/payload_constants.cc
@@ -20,9 +20,12 @@
 
 namespace chromeos_update_engine {
 
-const uint64_t kChromeOSMajorPayloadVersion = 1;
+// const uint64_t kChromeOSMajorPayloadVersion = 1;  DEPRECATED
 const uint64_t kBrilloMajorPayloadVersion = 2;
 
+const uint64_t kMinSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion;
+const uint64_t kMaxSupportedMajorPayloadVersion = kBrilloMajorPayloadVersion;
+
 const uint32_t kFullPayloadMinorVersion = 0;
 // const uint32_t kInPlaceMinorPayloadVersion = 1;  DEPRECATED
 const uint32_t kSourceMinorPayloadVersion = 2;
@@ -34,9 +37,6 @@
 const uint32_t kMinSupportedMinorPayloadVersion = kSourceMinorPayloadVersion;
 const uint32_t kMaxSupportedMinorPayloadVersion = kVerityMinorPayloadVersion;
 
-const uint64_t kMinSupportedMajorPayloadVersion = 1;
-const uint64_t kMaxSupportedMajorPayloadVersion = 2;
-
 const uint64_t kMaxPayloadHeaderSize = 24;
 
 const char kPartitionNameKernel[] = "kernel";
diff --git a/payload_consumer/payload_constants.h b/payload_consumer/payload_constants.h
index fe823f4..888fa2a 100644
--- a/payload_consumer/payload_constants.h
+++ b/payload_consumer/payload_constants.h
@@ -26,7 +26,7 @@
 namespace chromeos_update_engine {
 
 // The major version used by Chrome OS.
-extern const uint64_t kChromeOSMajorPayloadVersion;
+// extern const uint64_t kChromeOSMajorPayloadVersion;  DEPRECATED
 
 // The major version used by Brillo.
 extern const uint64_t kBrilloMajorPayloadVersion;
diff --git a/payload_consumer/payload_metadata.cc b/payload_consumer/payload_metadata.cc
index 337edb4..4d8ee7b 100644
--- a/payload_consumer/payload_metadata.cc
+++ b/payload_consumer/payload_metadata.cc
@@ -36,34 +36,18 @@
 const uint64_t PayloadMetadata::kDeltaManifestSizeSize = 8;
 const uint64_t PayloadMetadata::kDeltaMetadataSignatureSizeSize = 4;
 
-bool PayloadMetadata::GetMetadataSignatureSizeOffset(
-    uint64_t* out_offset) const {
-  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
-    return true;
-  }
-  return false;
+uint64_t PayloadMetadata::GetMetadataSignatureSizeOffset() const {
+  return kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
 }
 
-bool PayloadMetadata::GetManifestOffset(uint64_t* out_offset) const {
-  // Actual manifest begins right after the manifest size field or
-  // metadata signature size field if major version >= 2.
-  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
-    return true;
-  }
-  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
-    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
-                  kDeltaMetadataSignatureSizeSize;
-    return true;
-  }
-  LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
-  return false;
+uint64_t PayloadMetadata::GetManifestOffset() const {
+  // Actual manifest begins right after the metadata signature size field.
+  return kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
+         kDeltaMetadataSignatureSizeSize;
 }
 
 MetadataParseResult PayloadMetadata::ParsePayloadHeader(
     const brillo::Blob& payload, ErrorCode* error) {
-  uint64_t manifest_offset;
   // Ensure we have data to cover the major payload version.
   if (payload.size() < kDeltaManifestSizeOffset)
     return MetadataParseResult::kInsufficientData;
@@ -75,6 +59,11 @@
     return MetadataParseResult::kError;
   }
 
+  uint64_t manifest_offset = GetManifestOffset();
+  // Check again with the manifest offset.
+  if (payload.size() < manifest_offset)
+    return MetadataParseResult::kInsufficientData;
+
   // Extract the payload version from the metadata.
   static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
                 "Major payload version size mismatch");
@@ -92,15 +81,6 @@
     return MetadataParseResult::kError;
   }
 
-  // Get the manifest offset now that we have payload version.
-  if (!GetManifestOffset(&manifest_offset)) {
-    *error = ErrorCode::kUnsupportedMajorPayloadVersion;
-    return MetadataParseResult::kError;
-  }
-  // Check again with the manifest offset.
-  if (payload.size() < manifest_offset)
-    return MetadataParseResult::kInsufficientData;
-
   // Next, parse the manifest size.
   static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
                 "manifest_size size mismatch");
@@ -116,26 +96,20 @@
     return MetadataParseResult::kError;
   }
 
-  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
-    // Parse the metadata signature size.
-    static_assert(
-        sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize,
-        "metadata_signature_size size mismatch");
-    uint64_t metadata_signature_size_offset;
-    if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
-      *error = ErrorCode::kError;
-      return MetadataParseResult::kError;
-    }
-    memcpy(&metadata_signature_size_,
-           &payload[metadata_signature_size_offset],
-           kDeltaMetadataSignatureSizeSize);
-    metadata_signature_size_ = be32toh(metadata_signature_size_);
+  // Parse the metadata signature size.
+  static_assert(
+      sizeof(metadata_signature_size_) == kDeltaMetadataSignatureSizeSize,
+      "metadata_signature_size size mismatch");
+  uint64_t metadata_signature_size_offset = GetMetadataSignatureSizeOffset();
+  memcpy(&metadata_signature_size_,
+         &payload[metadata_signature_size_offset],
+         kDeltaMetadataSignatureSizeSize);
+  metadata_signature_size_ = be32toh(metadata_signature_size_);
 
-    if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
-      // Overflow detected.
-      *error = ErrorCode::kDownloadInvalidMetadataSize;
-      return MetadataParseResult::kError;
-    }
+  if (metadata_size_ + metadata_signature_size_ < metadata_size_) {
+    // Overflow detected.
+    *error = ErrorCode::kDownloadInvalidMetadataSize;
+    return MetadataParseResult::kError;
   }
   return MetadataParseResult::kSuccess;
 }
@@ -147,9 +121,7 @@
 
 bool PayloadMetadata::GetManifest(const brillo::Blob& payload,
                                   DeltaArchiveManifest* out_manifest) const {
-  uint64_t manifest_offset;
-  if (!GetManifestOffset(&manifest_offset))
-    return false;
+  uint64_t manifest_offset = GetManifestOffset();
   CHECK_GE(payload.size(), manifest_offset + manifest_size_);
   return out_manifest->ParseFromArray(&payload[manifest_offset],
                                       manifest_size_);
@@ -171,7 +143,7 @@
                  << metadata_signature;
       return ErrorCode::kDownloadMetadataSignatureError;
     }
-  } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
+  } else {
     metadata_signature_protobuf_blob.assign(
         payload.begin() + metadata_size_,
         payload.begin() + metadata_size_ + metadata_signature_size_);
@@ -243,8 +215,7 @@
     TEST_AND_RETURN_FALSE(GetManifest(payload, manifest));
   }
 
-  if (metadata_signatures != nullptr &&
-      GetMajorVersion() >= kBrilloMajorPayloadVersion) {
+  if (metadata_signatures != nullptr) {
     payload.clear();
     TEST_AND_RETURN_FALSE(utils::ReadFileChunk(
         payload_path, GetMetadataSize(), GetMetadataSignatureSize(), &payload));
diff --git a/payload_consumer/payload_metadata.h b/payload_consumer/payload_metadata.h
index ec8eea6..be43c41 100644
--- a/payload_consumer/payload_metadata.h
+++ b/payload_consumer/payload_metadata.h
@@ -94,14 +94,12 @@
                         Signatures* metadata_signatures);
 
  private:
-  // Set |*out_offset| to the byte offset at which the manifest protobuf begins
-  // in a payload. Return true on success, false if the offset is unknown.
-  bool GetManifestOffset(uint64_t* out_offset) const;
+  // Returns the byte offset at which the manifest protobuf begins in a payload.
+  uint64_t GetManifestOffset() const;
 
-  // Set |*out_offset| to the byte offset where the size of the metadata
-  // signature is stored in a payload. Return true on success, if this field is
-  // not present in the payload, return false.
-  bool GetMetadataSignatureSizeOffset(uint64_t* out_offset) const;
+  // Returns the byte offset where the size of the metadata signature is stored
+  // in a payload.
+  uint64_t GetMetadataSignatureSizeOffset() const;
 
   uint64_t metadata_size_{0};
   uint64_t manifest_size_{0};
diff --git a/payload_generator/ab_generator_unittest.cc b/payload_generator/ab_generator_unittest.cc
index 2f8c0c6..170e0e3 100644
--- a/payload_generator/ab_generator_unittest.cc
+++ b/payload_generator/ab_generator_unittest.cc
@@ -30,10 +30,10 @@
 #include "update_engine/common/test_utils.h"
 #include "update_engine/common/utils.h"
 #include "update_engine/payload_generator/annotated_operation.h"
-#include "update_engine/payload_generator/bzip.h"
 #include "update_engine/payload_generator/delta_diff_generator.h"
 #include "update_engine/payload_generator/extent_ranges.h"
 #include "update_engine/payload_generator/extent_utils.h"
+#include "update_engine/payload_generator/xz.h"
 
 using std::string;
 using std::vector;
@@ -48,8 +48,8 @@
   return ext.start_block() == start_block && ext.num_blocks() == num_blocks;
 }
 
-// Tests splitting of a REPLACE/REPLACE_BZ operation.
-void TestSplitReplaceOrReplaceBzOperation(InstallOperation_Type orig_type,
+// Tests splitting of a REPLACE/REPLACE_XZ operation.
+void TestSplitReplaceOrReplaceXzOperation(InstallOperation_Type orig_type,
                                           bool compressible) {
   const size_t op_ex1_start_block = 2;
   const size_t op_ex1_num_blocks = 2;
@@ -71,7 +71,7 @@
   }
   ASSERT_EQ(part_size, part_data.size());
   test_utils::ScopedTempFile part_file(
-      "SplitReplaceOrReplaceBzTest_part.XXXXXX");
+      "SplitReplaceOrReplaceXzTest_part.XXXXXX");
   ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operation and blob data.
@@ -97,7 +97,7 @@
   if (orig_type == InstallOperation::REPLACE) {
     op_blob = op_data;
   } else {
-    ASSERT_TRUE(BzipCompress(op_data, &op_blob));
+    ASSERT_TRUE(XzCompress(op_data, &op_blob));
   }
   op.set_data_offset(0);
   op.set_data_length(op_blob.size());
@@ -108,7 +108,7 @@
 
   // Create the data file.
   test_utils::ScopedTempFile data_file(
-      "SplitReplaceOrReplaceBzTest_data.XXXXXX");
+      "SplitReplaceOrReplaceXzTest_data.XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), op_blob));
   int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
@@ -118,14 +118,14 @@
 
   // Split the operation.
   vector<AnnotatedOperation> result_ops;
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   ASSERT_TRUE(ABGenerator::SplitAReplaceOp(
       version, aop, part_file.path(), &result_ops, &blob_file));
 
   // Check the result.
   InstallOperation_Type expected_type =
-      compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
+      compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
 
   ASSERT_EQ(2U, result_ops.size());
 
@@ -143,7 +143,7 @@
       part_data.begin() + op_ex1_offset + op_ex1_size);
   brillo::Blob first_expected_blob;
   if (compressible) {
-    ASSERT_TRUE(BzipCompress(first_expected_data, &first_expected_blob));
+    ASSERT_TRUE(XzCompress(first_expected_data, &first_expected_blob));
   } else {
     first_expected_blob = first_expected_data;
   }
@@ -173,7 +173,7 @@
       part_data.begin() + op_ex2_offset + op_ex2_size);
   brillo::Blob second_expected_blob;
   if (compressible) {
-    ASSERT_TRUE(BzipCompress(second_expected_data, &second_expected_blob));
+    ASSERT_TRUE(XzCompress(second_expected_data, &second_expected_blob));
   } else {
     second_expected_blob = second_expected_data;
   }
@@ -199,8 +199,8 @@
   }
 }
 
-// Tests merging of REPLACE/REPLACE_BZ operations.
-void TestMergeReplaceOrReplaceBzOperations(InstallOperation_Type orig_type,
+// Tests merging of REPLACE/REPLACE_XZ operations.
+void TestMergeReplaceOrReplaceXzOperations(InstallOperation_Type orig_type,
                                            bool compressible) {
   const size_t first_op_num_blocks = 1;
   const size_t second_op_num_blocks = 2;
@@ -221,7 +221,7 @@
   }
   ASSERT_EQ(part_size, part_data.size());
   test_utils::ScopedTempFile part_file(
-      "MergeReplaceOrReplaceBzTest_part.XXXXXX");
+      "MergeReplaceOrReplaceXzTest_part.XXXXXX");
   ASSERT_TRUE(test_utils::WriteFileVector(part_file.path(), part_data));
 
   // Create original operations and blob data.
@@ -239,7 +239,7 @@
   if (orig_type == InstallOperation::REPLACE) {
     first_op_blob = first_op_data;
   } else {
-    ASSERT_TRUE(BzipCompress(first_op_data, &first_op_blob));
+    ASSERT_TRUE(XzCompress(first_op_data, &first_op_blob));
   }
   first_op.set_data_offset(0);
   first_op.set_data_length(first_op_blob.size());
@@ -259,7 +259,7 @@
   if (orig_type == InstallOperation::REPLACE) {
     second_op_blob = second_op_data;
   } else {
-    ASSERT_TRUE(BzipCompress(second_op_data, &second_op_blob));
+    ASSERT_TRUE(XzCompress(second_op_data, &second_op_blob));
   }
   second_op.set_data_offset(first_op_blob.size());
   second_op.set_data_length(second_op_blob.size());
@@ -272,7 +272,7 @@
 
   // Create the data file.
   test_utils::ScopedTempFile data_file(
-      "MergeReplaceOrReplaceBzTest_data.XXXXXX");
+      "MergeReplaceOrReplaceXzTest_data.XXXXXX");
   EXPECT_TRUE(test_utils::WriteFileVector(data_file.path(), blob_data));
   int data_fd = open(data_file.path().c_str(), O_RDWR, 000);
   EXPECT_GE(data_fd, 0);
@@ -281,14 +281,14 @@
   BlobFileWriter blob_file(data_fd, &data_file_size);
 
   // Merge the operations.
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   EXPECT_TRUE(ABGenerator::MergeOperations(
       &aops, version, 5, part_file.path(), &blob_file));
 
   // Check the result.
   InstallOperation_Type expected_op_type =
-      compressible ? InstallOperation::REPLACE_BZ : InstallOperation::REPLACE;
+      compressible ? InstallOperation::REPLACE_XZ : InstallOperation::REPLACE;
   EXPECT_EQ(1U, aops.size());
   InstallOperation new_op = aops[0].op;
   EXPECT_EQ(expected_op_type, new_op.type());
@@ -303,7 +303,7 @@
                              part_data.begin() + total_op_size);
   brillo::Blob expected_blob;
   if (compressible) {
-    ASSERT_TRUE(BzipCompress(expected_data, &expected_blob));
+    ASSERT_TRUE(XzCompress(expected_data, &expected_blob));
   } else {
     expected_blob = expected_data;
   }
@@ -384,19 +384,19 @@
 }
 
 TEST_F(ABGeneratorTest, SplitReplaceTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, false);
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, false);
 }
 
-TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceBzTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE, true);
+TEST_F(ABGeneratorTest, SplitReplaceIntoReplaceXzTest) {
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE, true);
 }
 
-TEST_F(ABGeneratorTest, SplitReplaceBzTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, true);
+TEST_F(ABGeneratorTest, SplitReplaceXzTest) {
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, true);
 }
 
-TEST_F(ABGeneratorTest, SplitReplaceBzIntoReplaceTest) {
-  TestSplitReplaceOrReplaceBzOperation(InstallOperation::REPLACE_BZ, false);
+TEST_F(ABGeneratorTest, SplitReplaceXzIntoReplaceTest) {
+  TestSplitReplaceOrReplaceXzOperation(InstallOperation::REPLACE_XZ, false);
 }
 
 TEST_F(ABGeneratorTest, SortOperationsByDestinationTest) {
@@ -464,7 +464,7 @@
   aops.push_back(third_aop);
 
   BlobFileWriter blob_file(0, nullptr);
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 5, "", &blob_file));
 
@@ -484,19 +484,19 @@
 }
 
 TEST_F(ABGeneratorTest, MergeReplaceOperationsTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, false);
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, false);
 }
 
-TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceBzTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE, true);
+TEST_F(ABGeneratorTest, MergeReplaceOperationsToReplaceXzTest) {
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE, true);
 }
 
-TEST_F(ABGeneratorTest, MergeReplaceBzOperationsTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, true);
+TEST_F(ABGeneratorTest, MergeReplaceXzOperationsTest) {
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, true);
 }
 
-TEST_F(ABGeneratorTest, MergeReplaceBzOperationsToReplaceTest) {
-  TestMergeReplaceOrReplaceBzOperations(InstallOperation::REPLACE_BZ, false);
+TEST_F(ABGeneratorTest, MergeReplaceXzOperationsToReplaceTest) {
+  TestMergeReplaceOrReplaceXzOperations(InstallOperation::REPLACE_XZ, false);
 }
 
 TEST_F(ABGeneratorTest, NoMergeOperationsTest) {
@@ -537,7 +537,7 @@
   aops.push_back(fourth_aop);
 
   BlobFileWriter blob_file(0, nullptr);
-  PayloadVersion version(kChromeOSMajorPayloadVersion,
+  PayloadVersion version(kBrilloMajorPayloadVersion,
                          kSourceMinorPayloadVersion);
   EXPECT_TRUE(ABGenerator::MergeOperations(&aops, version, 4, "", &blob_file));
 
diff --git a/payload_generator/delta_diff_utils_unittest.cc b/payload_generator/delta_diff_utils_unittest.cc
index e25c867..bc3dca5 100644
--- a/payload_generator/delta_diff_utils_unittest.cc
+++ b/payload_generator/delta_diff_utils_unittest.cc
@@ -136,7 +136,7 @@
   bool RunDeltaMovedAndZeroBlocks(ssize_t chunk_blocks,
                                   uint32_t minor_version) {
     BlobFileWriter blob_file(blob_fd_, &blob_size_);
-    PayloadVersion version(kChromeOSMajorPayloadVersion, minor_version);
+    PayloadVersion version(kBrilloMajorPayloadVersion, minor_version);
     ExtentRanges old_zero_blocks;
     return diff_utils::DeltaMovedAndZeroBlocks(&aops_,
                                                old_part_.path,
@@ -225,8 +225,7 @@
         new_extents,
         {},  // old_deflates
         {},  // new_deflates
-        PayloadVersion(kChromeOSMajorPayloadVersion,
-                       kSourceMinorPayloadVersion),
+        PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
         &data,
         &op));
     EXPECT_FALSE(data.empty());
@@ -268,7 +267,7 @@
       new_extents,
       {},  // old_deflates
       {},  // new_deflates
-      PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
+      PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
       &data,
       &op));
   EXPECT_TRUE(data.empty());
@@ -302,7 +301,7 @@
       new_extents,
       {},  // old_deflates
       {},  // new_deflates
-      PayloadVersion(kChromeOSMajorPayloadVersion, kSourceMinorPayloadVersion),
+      PayloadVersion(kBrilloMajorPayloadVersion, kSourceMinorPayloadVersion),
       &data,
       &op));
 
diff --git a/payload_generator/full_update_generator_unittest.cc b/payload_generator/full_update_generator_unittest.cc
index e398125..5f39e8b 100644
--- a/payload_generator/full_update_generator_unittest.cc
+++ b/payload_generator/full_update_generator_unittest.cc
@@ -90,7 +90,7 @@
     EXPECT_EQ(config_.hard_chunk_size / config_.block_size,
               aops[i].op.dst_extents(0).num_blocks());
     if (aops[i].op.type() != InstallOperation::REPLACE) {
-      EXPECT_EQ(InstallOperation::REPLACE_BZ, aops[i].op.type());
+      EXPECT_EQ(InstallOperation::REPLACE_XZ, aops[i].op.type());
     }
   }
 }
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index 16f360f..69ac8bb 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -509,16 +509,10 @@
   partition_names = base::SplitString(
       FLAGS_partition_names, ":", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
   CHECK(!partition_names.empty());
-  if (FLAGS_major_version == kChromeOSMajorPayloadVersion ||
-      FLAGS_new_partitions.empty()) {
-    LOG_IF(FATAL, partition_names.size() != 2)
-        << "To support more than 2 partitions, please use the "
-        << "--new_partitions flag and major version 2.";
-    LOG_IF(FATAL,
-           partition_names[0] != kPartitionNameRoot ||
-               partition_names[1] != kPartitionNameKernel)
-        << "To support non-default partition name, please use the "
-        << "--new_partitions flag and major version 2.";
+  if (FLAGS_major_version < kMinSupportedMajorPayloadVersion ||
+      FLAGS_major_version > kMaxSupportedMajorPayloadVersion) {
+    LOG(FATAL) << "Unsupported major version " << FLAGS_major_version;
+    return 1;
   }
 
   if (!FLAGS_new_partitions.empty()) {
@@ -577,8 +571,6 @@
   }
 
   if (!FLAGS_new_postinstall_config_file.empty()) {
-    LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
-        << "Postinstall config is only allowed in major version 2 or newer.";
     brillo::KeyValueStore store;
     CHECK(store.Load(base::FilePath(FLAGS_new_postinstall_config_file)));
     CHECK(payload_config.target.LoadPostInstallConfig(store));
@@ -596,9 +588,6 @@
   CHECK(payload_config.target.LoadImageSize());
 
   if (!FLAGS_dynamic_partition_info_file.empty()) {
-    LOG_IF(FATAL, FLAGS_major_version == kChromeOSMajorPayloadVersion)
-        << "Dynamic partition info is only allowed in major version 2 or "
-           "newer.";
     brillo::KeyValueStore store;
     CHECK(store.Load(base::FilePath(FLAGS_dynamic_partition_info_file)));
     CHECK(payload_config.target.LoadDynamicPartitionMetadata(store));
diff --git a/payload_generator/payload_file.cc b/payload_generator/payload_file.cc
index 775a509..b55d03c 100644
--- a/payload_generator/payload_file.cc
+++ b/payload_generator/payload_file.cc
@@ -74,11 +74,9 @@
   manifest_.set_block_size(config.block_size);
   manifest_.set_max_timestamp(config.max_timestamp);
 
-  if (major_version_ == kBrilloMajorPayloadVersion) {
-    if (config.target.dynamic_partition_metadata != nullptr)
-      *(manifest_.mutable_dynamic_partition_metadata()) =
-          *(config.target.dynamic_partition_metadata);
-  }
+  if (config.target.dynamic_partition_metadata != nullptr)
+    *(manifest_.mutable_dynamic_partition_metadata()) =
+        *(config.target.dynamic_partition_metadata);
 
   return true;
 }
@@ -86,13 +84,6 @@
 bool PayloadFile::AddPartition(const PartitionConfig& old_conf,
                                const PartitionConfig& new_conf,
                                const vector<AnnotatedOperation>& aops) {
-  // Check partitions order for Chrome OS
-  if (major_version_ == kChromeOSMajorPayloadVersion) {
-    const vector<const char*> part_order = {kPartitionNameRoot,
-                                            kPartitionNameKernel};
-    TEST_AND_RETURN_FALSE(part_vec_.size() < part_order.size());
-    TEST_AND_RETURN_FALSE(new_conf.name == part_order[part_vec_.size()]);
-  }
   Partition part;
   part.name = new_conf.name;
   part.aops = aops;
@@ -134,66 +125,45 @@
   }
 
   // Copy the operations and partition info from the part_vec_ to the manifest.
-  manifest_.clear_install_operations();
-  manifest_.clear_kernel_install_operations();
   manifest_.clear_partitions();
   for (const auto& part : part_vec_) {
-    if (major_version_ == kBrilloMajorPayloadVersion) {
-      PartitionUpdate* partition = manifest_.add_partitions();
-      partition->set_partition_name(part.name);
-      if (part.postinstall.run) {
-        partition->set_run_postinstall(true);
-        if (!part.postinstall.path.empty())
-          partition->set_postinstall_path(part.postinstall.path);
-        if (!part.postinstall.filesystem_type.empty())
-          partition->set_filesystem_type(part.postinstall.filesystem_type);
-        partition->set_postinstall_optional(part.postinstall.optional);
+    PartitionUpdate* partition = manifest_.add_partitions();
+    partition->set_partition_name(part.name);
+    if (part.postinstall.run) {
+      partition->set_run_postinstall(true);
+      if (!part.postinstall.path.empty())
+        partition->set_postinstall_path(part.postinstall.path);
+      if (!part.postinstall.filesystem_type.empty())
+        partition->set_filesystem_type(part.postinstall.filesystem_type);
+      partition->set_postinstall_optional(part.postinstall.optional);
+    }
+    if (!part.verity.IsEmpty()) {
+      if (part.verity.hash_tree_extent.num_blocks() != 0) {
+        *partition->mutable_hash_tree_data_extent() =
+            part.verity.hash_tree_data_extent;
+        *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
+        partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
+        if (!part.verity.hash_tree_salt.empty())
+          partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
+                                        part.verity.hash_tree_salt.size());
       }
-      if (!part.verity.IsEmpty()) {
-        if (part.verity.hash_tree_extent.num_blocks() != 0) {
-          *partition->mutable_hash_tree_data_extent() =
-              part.verity.hash_tree_data_extent;
-          *partition->mutable_hash_tree_extent() = part.verity.hash_tree_extent;
-          partition->set_hash_tree_algorithm(part.verity.hash_tree_algorithm);
-          if (!part.verity.hash_tree_salt.empty())
-            partition->set_hash_tree_salt(part.verity.hash_tree_salt.data(),
-                                          part.verity.hash_tree_salt.size());
-        }
-        if (part.verity.fec_extent.num_blocks() != 0) {
-          *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
-          *partition->mutable_fec_extent() = part.verity.fec_extent;
-          partition->set_fec_roots(part.verity.fec_roots);
-        }
-      }
-      for (const AnnotatedOperation& aop : part.aops) {
-        *partition->add_operations() = aop.op;
-      }
-      if (part.old_info.has_size() || part.old_info.has_hash())
-        *(partition->mutable_old_partition_info()) = part.old_info;
-      if (part.new_info.has_size() || part.new_info.has_hash())
-        *(partition->mutable_new_partition_info()) = part.new_info;
-    } else {
-      // major_version_ == kChromeOSMajorPayloadVersion
-      if (part.name == kPartitionNameKernel) {
-        for (const AnnotatedOperation& aop : part.aops)
-          *manifest_.add_kernel_install_operations() = aop.op;
-        if (part.old_info.has_size() || part.old_info.has_hash())
-          *manifest_.mutable_old_kernel_info() = part.old_info;
-        if (part.new_info.has_size() || part.new_info.has_hash())
-          *manifest_.mutable_new_kernel_info() = part.new_info;
-      } else {
-        for (const AnnotatedOperation& aop : part.aops)
-          *manifest_.add_install_operations() = aop.op;
-        if (part.old_info.has_size() || part.old_info.has_hash())
-          *manifest_.mutable_old_rootfs_info() = part.old_info;
-        if (part.new_info.has_size() || part.new_info.has_hash())
-          *manifest_.mutable_new_rootfs_info() = part.new_info;
+      if (part.verity.fec_extent.num_blocks() != 0) {
+        *partition->mutable_fec_data_extent() = part.verity.fec_data_extent;
+        *partition->mutable_fec_extent() = part.verity.fec_extent;
+        partition->set_fec_roots(part.verity.fec_roots);
       }
     }
+    for (const AnnotatedOperation& aop : part.aops) {
+      *partition->add_operations() = aop.op;
+    }
+    if (part.old_info.has_size() || part.old_info.has_hash())
+      *(partition->mutable_old_partition_info()) = part.old_info;
+    if (part.new_info.has_size() || part.new_info.has_hash())
+      *(partition->mutable_new_partition_info()) = part.new_info;
   }
 
   // Signatures appear at the end of the blobs. Note the offset in the
-  // manifest_.
+  // |manifest_|.
   uint64_t signature_blob_length = 0;
   if (!private_key_path.empty()) {
     TEST_AND_RETURN_FALSE(PayloadSigner::SignatureBlobLength(
@@ -201,7 +171,6 @@
     PayloadSigner::AddSignatureToManifest(
         next_blob_offset,
         signature_blob_length,
-        major_version_ == kChromeOSMajorPayloadVersion,
         &manifest_);
   }
 
@@ -229,18 +198,14 @@
   TEST_AND_RETURN_FALSE(
       WriteUint64AsBigEndian(&writer, serialized_manifest.size()));
 
-  // Write metadata signature size.
-  uint32_t metadata_signature_size = 0;
-  if (major_version_ == kBrilloMajorPayloadVersion) {
-    // Metadata signature has the same size as payload signature, because they
-    // are both the same kind of signature for the same kind of hash.
-    uint32_t metadata_signature_size = htobe32(signature_blob_length);
-    TEST_AND_RETURN_FALSE_ERRNO(writer.Write(&metadata_signature_size,
-                                             sizeof(metadata_signature_size)));
-    metadata_size += sizeof(metadata_signature_size);
-    // Set correct size instead of big endian size.
-    metadata_signature_size = signature_blob_length;
-  }
+  // Metadata signature has the same size as payload signature, because they
+  // are both the same kind of signature for the same kind of hash.
+  uint32_t metadata_signature_size = htobe32(signature_blob_length);
+  TEST_AND_RETURN_FALSE_ERRNO(
+      writer.Write(&metadata_signature_size, sizeof(metadata_signature_size)));
+  metadata_size += sizeof(metadata_signature_size);
+  // Set correct size instead of big endian size.
+  metadata_signature_size = signature_blob_length;
 
   // Write protobuf
   LOG(INFO) << "Writing final delta file protobuf... "
@@ -249,8 +214,7 @@
       writer.Write(serialized_manifest.data(), serialized_manifest.size()));
 
   // Write metadata signature blob.
-  if (major_version_ == kBrilloMajorPayloadVersion &&
-      !private_key_path.empty()) {
+  if (!private_key_path.empty()) {
     brillo::Blob metadata_hash, metadata_signature;
     TEST_AND_RETURN_FALSE(HashCalculator::RawHashOfFile(
         payload_file, metadata_size, &metadata_hash));
@@ -262,7 +226,7 @@
         writer.Write(metadata_signature.data(), metadata_signature.size()));
   }
 
-  // Append the data blobs
+  // Append the data blobs.
   LOG(INFO) << "Writing final delta file data blobs...";
   int blobs_fd = open(ordered_blobs_path.c_str(), O_RDONLY, 0);
   ScopedFdCloser blobs_fd_closer(&blobs_fd);
diff --git a/payload_generator/payload_generation_config.cc b/payload_generator/payload_generation_config.cc
index 3b791c8..e1f700a 100644
--- a/payload_generator/payload_generation_config.cc
+++ b/payload_generator/payload_generation_config.cc
@@ -219,8 +219,7 @@
 }
 
 bool PayloadVersion::Validate() const {
-  TEST_AND_RETURN_FALSE(major == kChromeOSMajorPayloadVersion ||
-                        major == kBrilloMajorPayloadVersion);
+  TEST_AND_RETURN_FALSE(major == kBrilloMajorPayloadVersion);
   TEST_AND_RETURN_FALSE(minor == kFullPayloadMinorVersion ||
                         minor == kSourceMinorPayloadVersion ||
                         minor == kOpSrcHashMinorPayloadVersion ||
@@ -236,13 +235,10 @@
     case InstallOperation::REPLACE:
     case InstallOperation::REPLACE_BZ:
       // These operations were included in the original payload format.
-      return true;
-
     case InstallOperation::REPLACE_XZ:
-      // These operations are included in the major version used in Brillo, but
-      // can also be used with minor version 3 or newer.
-      return major == kBrilloMajorPayloadVersion ||
-             minor >= kOpSrcHashMinorPayloadVersion;
+      // These operations are included minor version 3 or newer and full
+      // payloads.
+      return true;
 
     case InstallOperation::ZERO:
     case InstallOperation::DISCARD:
@@ -298,8 +294,6 @@
   for (const PartitionConfig& part : target.partitions) {
     TEST_AND_RETURN_FALSE(part.ValidateExists());
     TEST_AND_RETURN_FALSE(part.size % block_size == 0);
-    if (version.major == kChromeOSMajorPayloadVersion)
-      TEST_AND_RETURN_FALSE(part.postinstall.IsEmpty());
     if (version.minor < kVerityMinorPayloadVersion)
       TEST_AND_RETURN_FALSE(part.verity.IsEmpty());
   }
diff --git a/payload_generator/payload_properties.cc b/payload_generator/payload_properties.cc
index 53e69f3..bc82eb7 100644
--- a/payload_generator/payload_properties.cc
+++ b/payload_generator/payload_properties.cc
@@ -119,8 +119,7 @@
     metadata_signatures_ = base::JoinString(base64_signatures, ":");
   }
 
-  is_delta_ = manifest.has_old_image_info() || manifest.has_old_kernel_info() ||
-              manifest.has_old_rootfs_info() ||
+  is_delta_ = manifest.has_old_image_info() ||
               std::any_of(manifest.partitions().begin(),
                           manifest.partitions().end(),
                           [](const PartitionUpdate& part) {
diff --git a/payload_generator/payload_signer.cc b/payload_generator/payload_signer.cc
index 2a7021f..420329f 100644
--- a/payload_generator/payload_signer.cc
+++ b/payload_generator/payload_signer.cc
@@ -98,23 +98,20 @@
   uint64_t metadata_size = payload_metadata.GetMetadataSize();
   uint32_t metadata_signature_size =
       payload_metadata.GetMetadataSignatureSize();
-  if (payload_metadata.GetMajorVersion() == kBrilloMajorPayloadVersion) {
-    // Write metadata signature size in header.
-    uint32_t metadata_signature_size_be =
-        htobe32(metadata_signature_blob.size());
-    memcpy(payload.data() + manifest_offset,
-           &metadata_signature_size_be,
-           sizeof(metadata_signature_size_be));
-    manifest_offset += sizeof(metadata_signature_size_be);
-    // Replace metadata signature.
-    payload.erase(payload.begin() + metadata_size,
-                  payload.begin() + metadata_size + metadata_signature_size);
-    payload.insert(payload.begin() + metadata_size,
-                   metadata_signature_blob.begin(),
-                   metadata_signature_blob.end());
-    metadata_signature_size = metadata_signature_blob.size();
-    LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
-  }
+  // Write metadata signature size in header.
+  uint32_t metadata_signature_size_be = htobe32(metadata_signature_blob.size());
+  memcpy(payload.data() + manifest_offset,
+         &metadata_signature_size_be,
+         sizeof(metadata_signature_size_be));
+  manifest_offset += sizeof(metadata_signature_size_be);
+  // Replace metadata signature.
+  payload.erase(payload.begin() + metadata_size,
+                payload.begin() + metadata_size + metadata_signature_size);
+  payload.insert(payload.begin() + metadata_size,
+                 metadata_signature_blob.begin(),
+                 metadata_signature_blob.end());
+  metadata_signature_size = metadata_signature_blob.size();
+  LOG(INFO) << "Metadata signature size: " << metadata_signature_size;
 
   DeltaArchiveManifest manifest;
   TEST_AND_RETURN_FALSE(payload_metadata.GetManifest(payload, &manifest));
@@ -138,7 +135,6 @@
     PayloadSigner::AddSignatureToManifest(
         payload.size() - metadata_size - metadata_signature_size,
         signature_blob.size(),
-        payload_metadata.GetMajorVersion() == kChromeOSMajorPayloadVersion,
         &manifest);
 
     // Updates the payload to include the new manifest.
@@ -209,25 +205,12 @@
 
 void PayloadSigner::AddSignatureToManifest(uint64_t signature_blob_offset,
                                            uint64_t signature_blob_length,
-                                           bool add_dummy_op,
                                            DeltaArchiveManifest* manifest) {
   LOG(INFO) << "Making room for signature in file";
   manifest->set_signatures_offset(signature_blob_offset);
   LOG(INFO) << "set? " << manifest->has_signatures_offset();
   manifest->set_signatures_offset(signature_blob_offset);
   manifest->set_signatures_size(signature_blob_length);
-  // Add a dummy op at the end to appease older clients
-  if (add_dummy_op) {
-    InstallOperation* dummy_op = manifest->add_kernel_install_operations();
-    dummy_op->set_type(InstallOperation::REPLACE);
-    dummy_op->set_data_offset(signature_blob_offset);
-    dummy_op->set_data_length(signature_blob_length);
-    Extent* dummy_extent = dummy_op->add_dst_extents();
-    // Tell the dummy op to write this data to a big sparse hole
-    dummy_extent->set_start_block(kSparseHole);
-    dummy_extent->set_num_blocks(
-        utils::DivRoundUp(signature_blob_length, kBlockSize));
-  }
 }
 
 bool PayloadSigner::VerifySignedPayload(const string& payload_path,
diff --git a/payload_generator/payload_signer.h b/payload_generator/payload_signer.h
index 83ddadc..71f4983 100644
--- a/payload_generator/payload_signer.h
+++ b/payload_generator/payload_signer.h
@@ -39,12 +39,9 @@
   static bool VerifySignedPayload(const std::string& payload_path,
                                   const std::string& public_key_path);
 
-  // Adds specified signature offset/length to given |manifest|, also adds a
-  // dummy operation that points to a signature blob located at the specified
-  // offset/length if |add_dummy_op| is true.
+  // Adds specified signature offset/length to given |manifest|.
   static void AddSignatureToManifest(uint64_t signature_blob_offset,
                                      uint64_t signature_blob_length,
-                                     bool add_dummy_op,
                                      DeltaArchiveManifest* manifest);
 
   // Given a raw |hash| and a private key in |private_key_path| calculates the
diff --git a/scripts/payload_info.py b/scripts/payload_info.py
index d10cb24..bb7f8a4 100755
--- a/scripts/payload_info.py
+++ b/scripts/payload_info.py
@@ -27,7 +27,6 @@
 
 import update_payload
 
-MAJOR_PAYLOAD_VERSION_CHROMEOS = 1
 MAJOR_PAYLOAD_VERSION_BRILLO = 2
 
 def DisplayValue(key, value):
@@ -69,15 +68,11 @@
   def _DisplayManifest(self):
     """Show information from the payload manifest."""
     manifest = self.payload.manifest
-    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
-      DisplayValue('Number of partitions', len(manifest.partitions))
-      for partition in manifest.partitions:
-        DisplayValue('  Number of "%s" ops' % partition.partition_name,
-                     len(partition.operations))
-    else:
-      DisplayValue('Number of operations', len(manifest.install_operations))
-      DisplayValue('Number of kernel ops',
-                   len(manifest.kernel_install_operations))
+    DisplayValue('Number of partitions', len(manifest.partitions))
+    for partition in manifest.partitions:
+      DisplayValue('  Number of "%s" ops' % partition.partition_name,
+                   len(partition.operations))
+
     DisplayValue('Block size', manifest.block_size)
     DisplayValue('Minor version', manifest.minor_version)
 
@@ -131,8 +126,8 @@
 
     Args:
       name: The name you want displayed above the operation table.
-      operations: The install_operations object that you want to display
-                  information about.
+      operations: The operations object that you want to display information
+                  about.
     """
     def _DisplayExtents(extents, name):
       """Show information about extents."""
@@ -170,14 +165,9 @@
     read_blocks = 0
     written_blocks = 0
     num_write_seeks = 0
-    if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
-      partitions_operations = [part.operations for part in manifest.partitions]
-    else:
-      partitions_operations = [manifest.install_operations,
-                               manifest.kernel_install_operations]
-    for operations in partitions_operations:
+    for partition in manifest.partitions:
       last_ext = None
-      for curr_op in operations:
+      for curr_op in partition.operations:
         read_blocks += sum([ext.num_blocks for ext in curr_op.src_extents])
         written_blocks += sum([ext.num_blocks for ext in curr_op.dst_extents])
         for curr_ext in curr_op.dst_extents:
@@ -187,11 +177,10 @@
             num_write_seeks += 1
           last_ext = curr_ext
 
-    # Old and new rootfs and kernel are read once during verification
-    read_blocks += manifest.old_rootfs_info.size / manifest.block_size
-    read_blocks += manifest.old_kernel_info.size / manifest.block_size
-    read_blocks += manifest.new_rootfs_info.size / manifest.block_size
-    read_blocks += manifest.new_kernel_info.size / manifest.block_size
+      # Old and new partitions are read once during verification.
+      read_blocks += partition.old_partition_info.size / manifest.block_size
+      read_blocks += partition.new_partition_info.size / manifest.block_size
+
     stats = {'read_blocks': read_blocks,
              'written_blocks': written_blocks,
              'num_write_seeks': num_write_seeks}
@@ -215,15 +204,9 @@
       self._DisplayStats(self.payload.manifest)
     if self.options.list_ops:
       print()
-      if self.payload.header.version == MAJOR_PAYLOAD_VERSION_BRILLO:
-        for partition in self.payload.manifest.partitions:
-          self._DisplayOps('%s install operations' % partition.partition_name,
-                           partition.operations)
-      else:
-        self._DisplayOps('Install operations',
-                         self.payload.manifest.install_operations)
-        self._DisplayOps('Kernel install operations',
-                         self.payload.manifest.kernel_install_operations)
+      for partition in self.payload.manifest.partitions:
+        self._DisplayOps('%s install operations' % partition.partition_name,
+                         partition.operations)
 
 
 def main():
diff --git a/scripts/payload_info_unittest.py b/scripts/payload_info_unittest.py
index a4ee9d5..bf9f60a 100755
--- a/scripts/payload_info_unittest.py
+++ b/scripts/payload_info_unittest.py
@@ -20,16 +20,16 @@
 from __future__ import print_function
 
 import StringIO
-import collections
-import mock
 import sys
 import unittest
 
+from contextlib import contextmanager
+
+import mock  # pylint: disable=import-error
+
 import payload_info
 import update_payload
 
-from contextlib import contextmanager
-
 from update_payload import update_metadata_pb2
 
 class FakePayloadError(Exception):
@@ -60,42 +60,47 @@
   def HasField(self, field):
     return hasattr(self, field)
 
+class FakeExtent(object):
+  """Fake Extent for testing."""
+  def __init__(self, start_block, num_blocks):
+    self.start_block = start_block
+    self.num_blocks = num_blocks
+
+class FakePartitionInfo(object):
+  """Fake PartitionInfo for testing."""
+  def __init__(self, size):
+    self.size = size
+
 class FakePartition(object):
   """Fake PartitionUpdate field for testing."""
 
-  def __init__(self, partition_name, operations):
+  def __init__(self, partition_name, operations, old_size, new_size):
     self.partition_name = partition_name
     self.operations = operations
+    self.old_partition_info = FakePartitionInfo(old_size)
+    self.new_partition_info = FakePartitionInfo(new_size)
 
 class FakeManifest(object):
   """Fake manifest for testing."""
 
-  def __init__(self, major_version):
-    FakeExtent = collections.namedtuple('FakeExtent',
-                                        ['start_block', 'num_blocks'])
-    self.install_operations = [FakeOp([],
-                                      [FakeExtent(1, 1), FakeExtent(2, 2)],
-                                      update_payload.common.OpType.REPLACE_BZ,
-                                      dst_length=3*4096,
-                                      data_offset=1,
-                                      data_length=1)]
-    self.kernel_install_operations = [FakeOp(
-        [FakeExtent(1, 1)],
-        [FakeExtent(x, x) for x in xrange(20)],
-        update_payload.common.OpType.SOURCE_COPY,
-        src_length=4096)]
-    if major_version == payload_info.MAJOR_PAYLOAD_VERSION_BRILLO:
-      self.partitions = [FakePartition('root', self.install_operations),
-                         FakePartition('kernel',
-                                       self.kernel_install_operations)]
-      self.install_operations = self.kernel_install_operations = []
+  def __init__(self):
+    self.partitions = [
+        FakePartition(update_payload.common.ROOTFS,
+                      [FakeOp([], [FakeExtent(1, 1), FakeExtent(2, 2)],
+                              update_payload.common.OpType.REPLACE_BZ,
+                              dst_length=3*4096,
+                              data_offset=1,
+                              data_length=1)
+                      ], 1 * 4096, 3 * 4096),
+        FakePartition(update_payload.common.KERNEL,
+                      [FakeOp([FakeExtent(1, 1)],
+                              [FakeExtent(x, x) for x in xrange(20)],
+                              update_payload.common.OpType.SOURCE_COPY,
+                              src_length=4096)
+                      ], 2 * 4096, 4 * 4096),
+    ]
     self.block_size = 4096
     self.minor_version = 4
-    FakePartInfo = collections.namedtuple('FakePartInfo', ['size'])
-    self.old_rootfs_info = FakePartInfo(1 * 4096)
-    self.old_kernel_info = FakePartInfo(2 * 4096)
-    self.new_rootfs_info = FakePartInfo(3 * 4096)
-    self.new_kernel_info = FakePartInfo(4 * 4096)
     self.signatures_offset = None
     self.signatures_size = None
 
@@ -106,23 +111,22 @@
 class FakeHeader(object):
   """Fake payload header for testing."""
 
-  def __init__(self, version, manifest_len, metadata_signature_len):
-    self.version = version
+  def __init__(self, manifest_len, metadata_signature_len):
+    self.version = payload_info.MAJOR_PAYLOAD_VERSION_BRILLO
     self.manifest_len = manifest_len
     self.metadata_signature_len = metadata_signature_len
 
   @property
   def size(self):
-    return (20 if self.version == payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS
-            else 24)
+    return 24
 
 class FakePayload(object):
   """Fake payload for testing."""
 
-  def __init__(self, major_version):
-    self._header = FakeHeader(major_version, 222, 0)
+  def __init__(self):
+    self._header = FakeHeader(222, 0)
     self.header = None
-    self._manifest = FakeManifest(major_version)
+    self._manifest = FakeManifest()
     self.manifest = None
 
     self._blobs = {}
@@ -203,49 +207,22 @@
   def testRun(self):
     """Verify that Run parses and displays the payload like we expect."""
     payload_cmd = payload_info.PayloadCommand(FakeOption(action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
+    payload = FakePayload()
+    expected_out = """Payload version:             2
 Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
 Block size:                  4096
 Minor version:               4
 """
     self.TestCommand(payload_cmd, payload, expected_out)
 
-  def testListOpsOnVersion1(self):
-    """Verify that the --list_ops option gives the correct output."""
-    payload_cmd = payload_info.PayloadCommand(
-        FakeOption(list_ops=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
-Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
-Block size:                  4096
-Minor version:               4
-
-Install operations:
-  0: REPLACE_BZ
-    Data offset: 1
-    Data length: 1
-    Destination: 2 extents (3 blocks)
-      (1,1) (2,2)
-Kernel install operations:
-  0: SOURCE_COPY
-    Source: 1 extent (1 block)
-      (1,1)
-    Destination: 20 extents (190 blocks)
-      (0,0) (1,1) (2,2) (3,3) (4,4) (5,5) (6,6) (7,7) (8,8) (9,9) (10,10)
-      (11,11) (12,12) (13,13) (14,14) (15,15) (16,16) (17,17) (18,18) (19,19)
-"""
-    self.TestCommand(payload_cmd, payload, expected_out)
-
   def testListOpsOnVersion2(self):
     """Verify that the --list_ops option gives the correct output."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(list_ops=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload = FakePayload()
     expected_out = """Payload version:             2
 Manifest length:             222
 Number of partitions:        2
@@ -270,28 +247,11 @@
 """
     self.TestCommand(payload_cmd, payload, expected_out)
 
-  def testStatsOnVersion1(self):
-    """Verify that the --stats option works correctly."""
-    payload_cmd = payload_info.PayloadCommand(
-        FakeOption(stats=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
-Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
-Block size:                  4096
-Minor version:               4
-Blocks read:                 11
-Blocks written:              193
-Seeks when writing:          18
-"""
-    self.TestCommand(payload_cmd, payload, expected_out)
-
   def testStatsOnVersion2(self):
     """Verify that the --stats option works correctly on version 2."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(stats=True, action='show'))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload = FakePayload()
     expected_out = """Payload version:             2
 Manifest length:             222
 Number of partitions:        2
@@ -309,11 +269,12 @@
     """Verify that the --signatures option works with unsigned payloads."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(action='show', signatures=True))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_CHROMEOS)
-    expected_out = """Payload version:             1
+    payload = FakePayload()
+    expected_out = """Payload version:             2
 Manifest length:             222
-Number of operations:        1
-Number of kernel ops:        1
+Number of partitions:        2
+  Number of "root" ops:      1
+  Number of "kernel" ops:    1
 Block size:                  4096
 Minor version:               4
 No metadata signatures stored in the payload
@@ -325,7 +286,7 @@
     """Verify that the --signatures option shows the present signatures."""
     payload_cmd = payload_info.PayloadCommand(
         FakeOption(action='show', signatures=True))
-    payload = FakePayload(payload_info.MAJOR_PAYLOAD_VERSION_BRILLO)
+    payload = FakePayload()
     payload.AddPayloadSignature(version=1,
                                 data='12345678abcdefgh\x00\x01\x02\x03')
     payload.AddPayloadSignature(data='I am a signature so access is yes.')
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 3f64444..511ed49 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -29,7 +29,6 @@
 import array
 import bz2
 import hashlib
-import itertools
 # Not everywhere we can have the lzma library so we ignore it if we didn't have
 # it because it is not going to be used. For example, 'cros flash' uses
 # devserver code which eventually loads this file, but the lzma library is not
@@ -45,7 +44,6 @@
   except ImportError:
     pass
 import os
-import shutil
 import subprocess
 import sys
 import tempfile
@@ -116,12 +114,8 @@
       break
     read_length = min(max_length, ex.num_blocks * block_size)
 
-    # Fill with zeros or read from file, depending on the type of extent.
-    if ex.start_block == common.PSEUDO_EXTENT_MARKER:
-      data.extend(itertools.repeat('\0', read_length))
-    else:
-      file_obj.seek(ex.start_block * block_size)
-      data.fromfile(file_obj, read_length)
+    file_obj.seek(ex.start_block * block_size)
+    data.fromfile(file_obj, read_length)
 
     max_length -= read_length
 
@@ -150,11 +144,9 @@
       raise PayloadError('%s: more write extents than data' % ex_name)
     write_length = min(data_length, ex.num_blocks * block_size)
 
-    # Only do actual writing if this is not a pseudo-extent.
-    if ex.start_block != common.PSEUDO_EXTENT_MARKER:
-      file_obj.seek(ex.start_block * block_size)
-      data_view = buffer(data, data_offset, write_length)
-      file_obj.write(data_view)
+    file_obj.seek(ex.start_block * block_size)
+    data_view = buffer(data, data_offset, write_length)
+    file_obj.write(data_view)
 
     data_offset += write_length
     data_length -= write_length
@@ -189,15 +181,12 @@
     if not data_length:
       raise PayloadError('%s: more extents than total data length' % ex_name)
 
-    is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
-    start_byte = -1 if is_pseudo else ex.start_block * block_size
+    start_byte = ex.start_block * block_size
     num_bytes = ex.num_blocks * block_size
     if data_length < num_bytes:
       # We're only padding a real extent.
-      if not is_pseudo:
-        pad_off = start_byte + data_length
-        pad_len = num_bytes - data_length
-
+      pad_off = start_byte + data_length
+      pad_len = num_bytes - data_length
       num_bytes = data_length
 
     arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
@@ -274,30 +263,28 @@
       num_blocks = ex.num_blocks
       count = num_blocks * block_size
 
-      # Make sure it's not a fake (signature) operation.
-      if start_block != common.PSEUDO_EXTENT_MARKER:
-        data_end = data_start + count
+      data_end = data_start + count
 
-        # Make sure we're not running past partition boundary.
-        if (start_block + num_blocks) * block_size > part_size:
-          raise PayloadError(
-              '%s: extent (%s) exceeds partition size (%d)' %
-              (ex_name, common.FormatExtent(ex, block_size),
-               part_size))
+      # Make sure we're not running past partition boundary.
+      if (start_block + num_blocks) * block_size > part_size:
+        raise PayloadError(
+            '%s: extent (%s) exceeds partition size (%d)' %
+            (ex_name, common.FormatExtent(ex, block_size),
+             part_size))
 
-        # Make sure that we have enough data to write.
-        if data_end >= data_length + block_size:
-          raise PayloadError(
-              '%s: more dst blocks than data (even with padding)')
+      # Make sure that we have enough data to write.
+      if data_end >= data_length + block_size:
+        raise PayloadError(
+            '%s: more dst blocks than data (even with padding)')
 
-        # Pad with zeros if necessary.
-        if data_end > data_length:
-          padding = data_end - data_length
-          out_data += '\0' * padding
+      # Pad with zeros if necessary.
+      if data_end > data_length:
+        padding = data_end - data_length
+        out_data += '\0' * padding
 
-        self.payload.payload_file.seek(start_block * block_size)
-        part_file.seek(start_block * block_size)
-        part_file.write(out_data[data_start:data_end])
+      self.payload.payload_file.seek(start_block * block_size)
+      part_file.seek(start_block * block_size)
+      part_file.write(out_data[data_start:data_end])
 
       data_start += count
 
@@ -323,10 +310,8 @@
     # Iterate over the extents and write zero.
     # pylint: disable=unused-variable
     for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
-      # Only do actual writing if this is not a pseudo-extent.
-      if ex.start_block != common.PSEUDO_EXTENT_MARKER:
-        part_file.seek(ex.start_block * block_size)
-        part_file.write('\0' * (ex.num_blocks * block_size))
+      part_file.seek(ex.start_block * block_size)
+      part_file.write('\0' * (ex.num_blocks * block_size))
 
   def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
                                 new_part_file):
@@ -597,20 +582,11 @@
     install_operations = []
 
     manifest = self.payload.manifest
-    if self.payload.header.version == 1:
-      for real_name, proto_name in common.CROS_PARTITIONS:
-        new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name)
-        old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name)
-
-      install_operations.append((common.ROOTFS, manifest.install_operations))
-      install_operations.append((common.KERNEL,
-                                 manifest.kernel_install_operations))
-    else:
-      for part in manifest.partitions:
-        name = part.partition_name
-        new_part_info[name] = part.new_partition_info
-        old_part_info[name] = part.old_partition_info
-        install_operations.append((name, part.operations))
+    for part in manifest.partitions:
+      name = part.partition_name
+      new_part_info[name] = part.new_partition_info
+      old_part_info[name] = part.old_partition_info
+      install_operations.append((name, part.operations))
 
     part_names = set(new_part_info.keys())  # Equivalently, old_part_info.keys()
 
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 674d9f4..4558872 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -45,11 +45,9 @@
 # Constants.
 #
 
-_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
 _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
 _CHECK_PAYLOAD_SIG = 'payload-sig'
 CHECKS_TO_DISABLE = (
-    _CHECK_DST_PSEUDO_EXTENTS,
     _CHECK_MOVE_SAME_SRC_DST_BLOCK,
     _CHECK_PAYLOAD_SIG,
 )
@@ -320,8 +318,6 @@
     self.allow_unhashed = allow_unhashed
 
     # Disable specific tests.
-    self.check_dst_pseudo_extents = (
-        _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
     self.check_move_same_src_dst_block = (
         _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
     self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
@@ -625,35 +621,23 @@
     self._CheckPresentIff(self.sigs_offset, self.sigs_size,
                           'signatures_offset', 'signatures_size', 'manifest')
 
-    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-      for real_name, proto_name in common.CROS_PARTITIONS:
-        self.old_part_info[real_name] = self._CheckOptionalSubMsg(
-            manifest, 'old_%s_info' % proto_name, report)
-        self.new_part_info[real_name] = self._CheckMandatorySubMsg(
-            manifest, 'new_%s_info' % proto_name, report, 'manifest')
+    for part in manifest.partitions:
+      name = part.partition_name
+      self.old_part_info[name] = self._CheckOptionalSubMsg(
+          part, 'old_partition_info', report)
+      self.new_part_info[name] = self._CheckMandatorySubMsg(
+          part, 'new_partition_info', report, 'manifest.partitions')
 
-      # Check: old_kernel_info <==> old_rootfs_info.
-      self._CheckPresentIff(self.old_part_info[common.KERNEL].msg,
-                            self.old_part_info[common.ROOTFS].msg,
-                            'old_kernel_info', 'old_rootfs_info', 'manifest')
-    else:
-      for part in manifest.partitions:
-        name = part.partition_name
-        self.old_part_info[name] = self._CheckOptionalSubMsg(
-            part, 'old_partition_info', report)
-        self.new_part_info[name] = self._CheckMandatorySubMsg(
-            part, 'new_partition_info', report, 'manifest.partitions')
+    # Check: Old-style partition infos should not be specified.
+    for _, part in common.CROS_PARTITIONS:
+      self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
+      self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
 
-      # Check: Old-style partition infos should not be specified.
-      for _, part in common.CROS_PARTITIONS:
-        self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
-        self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
-
-      # Check: If old_partition_info is specified anywhere, it must be
-      # specified everywhere.
-      old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
-      self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
-                                'manifest.partitions')
+    # Check: If old_partition_info is specified anywhere, it must be
+    # specified everywhere.
+    old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
+    self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
+                              'manifest.partitions')
 
     is_delta = any(part and part.msg for part in self.old_part_info.values())
     if is_delta:
@@ -721,8 +705,7 @@
     self._CheckBlocksFitLength(length, total_blocks, self.block_size,
                                '%s: %s' % (op_name, length_name))
 
-  def _CheckExtents(self, extents, usable_size, block_counters, name,
-                    allow_pseudo=False, allow_signature=False):
+  def _CheckExtents(self, extents, usable_size, block_counters, name):
     """Checks a sequence of extents.
 
     Args:
@@ -730,8 +713,6 @@
       usable_size: The usable size of the partition to which the extents apply.
       block_counters: Array of counters corresponding to the number of blocks.
       name: The name of the extent block.
-      allow_pseudo: Whether or not pseudo block numbers are allowed.
-      allow_signature: Whether or not the extents are used for a signature.
 
     Returns:
       The total number of blocks in the extents.
@@ -752,20 +733,15 @@
       if num_blocks == 0:
         raise error.PayloadError('%s: extent length is zero.' % ex_name)
 
-      if start_block != common.PSEUDO_EXTENT_MARKER:
-        # Check: Make sure we're within the partition limit.
-        if usable_size and end_block * self.block_size > usable_size:
-          raise error.PayloadError(
-              '%s: extent (%s) exceeds usable partition size (%d).' %
-              (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
+      # Check: Make sure we're within the partition limit.
+      if usable_size and end_block * self.block_size > usable_size:
+        raise error.PayloadError(
+            '%s: extent (%s) exceeds usable partition size (%d).' %
+            (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
 
-        # Record block usage.
-        for i in xrange(start_block, end_block):
-          block_counters[i] += 1
-      elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
-        # Pseudo-extents must be allowed explicitly, or otherwise be part of a
-        # signature operation (in which case there has to be exactly one).
-        raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
+      # Record block usage.
+      for i in xrange(start_block, end_block):
+        block_counters[i] += 1
 
       total_num_blocks += num_blocks
 
@@ -896,21 +872,19 @@
     if self.minor_version >= 3 and op.src_sha256_hash is None:
       raise error.PayloadError('%s: source hash missing.' % op_name)
 
-  def _CheckOperation(self, op, op_name, is_last, old_block_counters,
-                      new_block_counters, old_usable_size, new_usable_size,
-                      prev_data_offset, allow_signature, blob_hash_counts):
+  def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters,
+                      old_usable_size, new_usable_size, prev_data_offset,
+                      blob_hash_counts):
     """Checks a single update operation.
 
     Args:
       op: The operation object.
       op_name: Operation name string for error reporting.
-      is_last: Whether this is the last operation in the sequence.
       old_block_counters: Arrays of block read counters.
       new_block_counters: Arrays of block write counters.
       old_usable_size: The overall usable size for src data in bytes.
       new_usable_size: The overall usable size for dst data in bytes.
       prev_data_offset: Offset of last used data bytes.
-      allow_signature: Whether this may be a signature operation.
       blob_hash_counts: Counters for hashed/unhashed blobs.
 
     Returns:
@@ -922,14 +896,10 @@
     # Check extents.
     total_src_blocks = self._CheckExtents(
         op.src_extents, old_usable_size, old_block_counters,
-        op_name + '.src_extents', allow_pseudo=True)
-    allow_signature_in_extents = (allow_signature and is_last and
-                                  op.type == common.OpType.REPLACE)
+        op_name + '.src_extents')
     total_dst_blocks = self._CheckExtents(
         op.dst_extents, new_usable_size, new_block_counters,
-        op_name + '.dst_extents',
-        allow_pseudo=(not self.check_dst_pseudo_extents),
-        allow_signature=allow_signature_in_extents)
+        op_name + '.dst_extents')
 
     # Check: data_offset present <==> data_length present.
     data_offset = self._CheckOptionalField(op, 'data_offset', None)
@@ -965,9 +935,7 @@
             (op_name, common.FormatSha256(op.data_sha256_hash),
              common.FormatSha256(actual_hash.digest())))
     elif data_offset is not None:
-      if allow_signature_in_extents:
-        blob_hash_counts['signature'] += 1
-      elif self.allow_unhashed:
+      if self.allow_unhashed:
         blob_hash_counts['unhashed'] += 1
       else:
         raise error.PayloadError('%s: unhashed operation not allowed.' %
@@ -981,11 +949,8 @@
             (op_name, data_offset, prev_data_offset))
 
     # Type-specific checks.
-    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
-      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
-    elif (op.type == common.OpType.REPLACE_XZ and
-          (self.minor_version >= 3 or
-           self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)):
+    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+                   common.OpType.REPLACE_XZ):
       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
     elif op.type == common.OpType.ZERO and self.minor_version >= 4:
       self._CheckZeroOperation(op, op_name)
@@ -1030,7 +995,7 @@
 
   def _CheckOperations(self, operations, report, base_name, old_fs_size,
                        new_fs_size, old_usable_size, new_usable_size,
-                       prev_data_offset, allow_signature):
+                       prev_data_offset):
     """Checks a sequence of update operations.
 
     Args:
@@ -1042,7 +1007,6 @@
       old_usable_size: The overall usable size of the old partition in bytes.
       new_usable_size: The overall usable size of the new partition in bytes.
       prev_data_offset: Offset of last used data bytes.
-      allow_signature: Whether this sequence may contain signature operations.
 
     Returns:
       The total data blob size used.
@@ -1078,8 +1042,6 @@
         'hashed': 0,
         'unhashed': 0,
     }
-    if allow_signature:
-      blob_hash_counts['signature'] = 0
 
     # Allocate old and new block counters.
     old_block_counters = (self._AllocBlockCounters(old_usable_size)
@@ -1096,12 +1058,10 @@
         raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
       op_counts[op.type] += 1
 
-      is_last = op_num == len(operations)
       curr_data_used = self._CheckOperation(
-          op, op_name, is_last, old_block_counters, new_block_counters,
+          op, op_name, old_block_counters, new_block_counters,
           old_usable_size, new_usable_size,
-          prev_data_offset + total_data_used, allow_signature,
-          blob_hash_counts)
+          prev_data_offset + total_data_used, blob_hash_counts)
       if curr_data_used:
         op_blob_totals[op.type] += curr_data_used
         total_data_used += curr_data_used
@@ -1155,21 +1115,18 @@
     if not sigs.signatures:
       raise error.PayloadError('Signature block is empty.')
 
-    last_ops_section = (self.payload.manifest.kernel_install_operations or
-                        self.payload.manifest.install_operations)
-
-    # Only major version 1 has the fake signature OP at the end.
-    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-      fake_sig_op = last_ops_section[-1]
+    # Check that we don't have the signature operation blob at the end (used to
+    # be for major version 1).
+    last_partition = self.payload.manifest.partitions[-1]
+    if last_partition.operations:
+      last_op = last_partition.operations[-1]
       # Check: signatures_{offset,size} must match the last (fake) operation.
-      if not (fake_sig_op.type == common.OpType.REPLACE and
-              self.sigs_offset == fake_sig_op.data_offset and
-              self.sigs_size == fake_sig_op.data_length):
-        raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not'
-                                 ' match last operation (%d+%d).' %
-                                 (self.sigs_offset, self.sigs_size,
-                                  fake_sig_op.data_offset,
-                                  fake_sig_op.data_length))
+      if (last_op.type == common.OpType.REPLACE and
+          last_op.data_offset == self.sigs_offset and
+          last_op.data_length == self.sigs_size):
+        raise error.PayloadError('It seems like the last operation is the '
+                                 'signature blob. This is an invalid payload.')
+
 
     # Compute the checksum of all data up to signature blob.
     # TODO(garnold) we're re-reading the whole data section into a string
@@ -1248,29 +1205,17 @@
       self._CheckManifest(report, part_sizes)
       assert self.payload_type, 'payload type should be known by now'
 
-      manifest = self.payload.manifest
-
-      # Part 3: Examine partition operations.
-      install_operations = []
-      if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-        # partitions field should not ever exist in major version 1 payloads
-        self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest')
-
-        install_operations.append((common.ROOTFS, manifest.install_operations))
-        install_operations.append((common.KERNEL,
-                                   manifest.kernel_install_operations))
-
-      else:
-        self._CheckRepeatedElemNotPresent(manifest, 'install_operations',
+      # Make sure deprecated values are not present in the payload.
+      for field in ('install_operations', 'kernel_install_operations'):
+        self._CheckRepeatedElemNotPresent(self.payload.manifest, field,
                                           'manifest')
-        self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations',
-                                          'manifest')
-
-        for update in manifest.partitions:
-          install_operations.append((update.partition_name, update.operations))
+      for field in ('old_kernel_info', 'old_rootfs_info',
+                    'new_kernel_info', 'new_rootfs_info'):
+        self._CheckElemNotPresent(self.payload.manifest, field, 'manifest')
 
       total_blob_size = 0
-      for part, operations in install_operations:
+      for part, operations in ((p.partition_name, p.operations)
+                               for p in self.payload.manifest.partitions):
         report.AddSection('%s operations' % part)
 
         new_fs_usable_size = self.new_fs_sizes[part]
@@ -1285,16 +1230,13 @@
         total_blob_size += self._CheckOperations(
             operations, report, '%s_install_operations' % part,
             self.old_fs_sizes[part], self.new_fs_sizes[part],
-            old_fs_usable_size, new_fs_usable_size, total_blob_size,
-            (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION
-             and part == common.KERNEL))
+            old_fs_usable_size, new_fs_usable_size, total_blob_size)
 
       # Check: Operations data reach the end of the payload file.
       used_payload_size = self.payload.data_offset + total_blob_size
       # Major versions 2 and higher have a signature at the end, so it should be
       # considered in the total size of the image.
-      if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and
-          self.sigs_size):
+      if self.sigs_size:
         used_payload_size += self.sigs_size
 
       if used_payload_size != payload_file_size:
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index b5f2f3e..4881653 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -427,10 +427,10 @@
       payload_gen.SetBlockSize(test_utils.KiB(4))
 
     # Add some operations.
-    payload_gen.AddOperation(False, common.OpType.SOURCE_COPY,
+    payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY,
                              src_extents=[(0, 16), (16, 497)],
                              dst_extents=[(16, 496), (0, 16)])
-    payload_gen.AddOperation(True, common.OpType.SOURCE_COPY,
+    payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY,
                              src_extents=[(0, 8), (8, 8)],
                              dst_extents=[(8, 8), (0, 8)])
 
@@ -456,19 +456,21 @@
     if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
       oki_hash = (None if fail_bad_oki
                   else hashlib.sha256('fake-oki-content').digest())
-      payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
+      payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size,
+                              oki_hash)
     if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
                                         fail_bad_ori):
       ori_hash = (None if fail_bad_ori
                   else hashlib.sha256('fake-ori-content').digest())
-      payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
+      payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size,
+                              ori_hash)
 
     # Add new kernel/rootfs partition info.
     payload_gen.SetPartInfo(
-        True, True, new_kernel_fs_size,
+        common.KERNEL, True, new_kernel_fs_size,
         None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
     payload_gen.SetPartInfo(
-        False, True, new_rootfs_fs_size,
+        common.ROOTFS, True, new_rootfs_fs_size,
         None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
 
     # Set the minor version.
@@ -521,23 +523,6 @@
         payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
                                       collections.defaultdict(int), 'foo'))
 
-    # Passes w/ pseudo-extents (aka sparse holes).
-    extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5),
-                                 (8, 3))
-    self.assertEquals(
-        12,
-        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
-                                      collections.defaultdict(int), 'foo',
-                                      allow_pseudo=True))
-
-    # Passes w/ pseudo-extent due to a signature.
-    extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2))
-    self.assertEquals(
-        2,
-        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
-                                      collections.defaultdict(int), 'foo',
-                                      allow_signature=True))
-
     # Fails, extent missing a start block.
     extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
     self.assertRaises(
@@ -704,8 +689,8 @@
     self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,
                       None, 0, 1, 'foo')
 
-  def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
-                           allow_unhashed, fail_src_extents, fail_dst_extents,
+  def DoCheckOperationTest(self, op_type_name, allow_unhashed,
+                           fail_src_extents, fail_dst_extents,
                            fail_mismatched_data_offset_length,
                            fail_missing_dst_extents, fail_src_length,
                            fail_dst_length, fail_data_hash,
@@ -715,8 +700,6 @@
     Args:
       op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
         'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
-      is_last: Whether we're testing the last operation in a sequence.
-      allow_signature: Whether we're testing a signature-capable operation.
       allow_unhashed: Whether we're allowing to not hash the data.
       fail_src_extents: Tamper with src extents.
       fail_dst_extents: Tamper with dst extents.
@@ -762,8 +745,7 @@
                           self.NewExtentList((1, 16)))
         total_src_blocks = 16
 
-    # TODO(tbrindus): add major version 2 tests.
-    payload_checker.major_version = common.CHROMEOS_MAJOR_PAYLOAD_VERSION
+    payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION
     if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       payload_checker.minor_version = 0
     elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
@@ -785,13 +767,11 @@
         op.data_offset = prev_data_offset
 
       fake_data = 'fake-data'.ljust(op.data_length)
-      if not (allow_unhashed or (is_last and allow_signature and
-                                 op_type == common.OpType.REPLACE)):
-        if not fail_data_hash:
-          # Create a valid data blob hash.
-          op.data_sha256_hash = hashlib.sha256(fake_data).digest()
-          payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
-              fake_data)
+      if not allow_unhashed and not fail_data_hash:
+        # Create a valid data blob hash.
+        op.data_sha256_hash = hashlib.sha256(fake_data).digest()
+        payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
+            fake_data)
 
       elif fail_data_hash:
         # Create an invalid data blob hash.
@@ -833,8 +813,8 @@
                    fail_missing_dst_extents or fail_src_length or
                    fail_dst_length or fail_data_hash or fail_prev_data_offset or
                    fail_bad_minor_version)
-    args = (op, 'foo', is_last, old_block_counters, new_block_counters,
-            old_part_size, new_part_size, prev_data_offset, allow_signature,
+    args = (op, 'foo', old_block_counters, new_block_counters,
+            old_part_size, new_part_size, prev_data_offset,
             blob_hash_counts)
     if should_fail:
       self.assertRaises(PayloadError, payload_checker._CheckOperation, *args)
@@ -876,7 +856,7 @@
     if fail_nonexhaustive_full_update:
       rootfs_data_length -= block_size
 
-    payload_gen.AddOperation(False, rootfs_op_type,
+    payload_gen.AddOperation(common.ROOTFS, rootfs_op_type,
                              dst_extents=[(0, rootfs_data_length / block_size)],
                              data_offset=0,
                              data_length=rootfs_data_length)
@@ -887,17 +867,17 @@
                                              'allow_unhashed': True})
     payload_checker.payload_type = checker._TYPE_FULL
     report = checker._PayloadReport()
-
-    args = (payload_checker.payload.manifest.install_operations, report, 'foo',
-            0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False)
+    partition = next((p for p in payload_checker.payload.manifest.partitions
+                      if p.partition_name == common.ROOTFS), None)
+    args = (partition.operations, report, 'foo',
+            0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0)
     if fail_nonexhaustive_full_update:
       self.assertRaises(PayloadError, payload_checker._CheckOperations, *args)
     else:
       self.assertEqual(rootfs_data_length,
                        payload_checker._CheckOperations(*args))
 
-  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
-                            fail_mismatched_pseudo_op, fail_sig_missing_fields,
+  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields,
                             fail_unknown_sig_version, fail_incorrect_sig):
     """Tests _CheckSignatures()."""
     # Generate a test payload. For this test, we only care about the signature
@@ -908,20 +888,18 @@
     payload_gen.SetBlockSize(block_size)
     rootfs_part_size = test_utils.MiB(2)
     kernel_part_size = test_utils.KiB(16)
-    payload_gen.SetPartInfo(False, True, rootfs_part_size,
+    payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size,
                             hashlib.sha256('fake-new-rootfs-content').digest())
-    payload_gen.SetPartInfo(True, True, kernel_part_size,
+    payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size,
                             hashlib.sha256('fake-new-kernel-content').digest())
     payload_gen.SetMinorVersion(0)
     payload_gen.AddOperationWithData(
-        False, common.OpType.REPLACE,
+        common.ROOTFS, common.OpType.REPLACE,
         dst_extents=[(0, rootfs_part_size / block_size)],
         data_blob=os.urandom(rootfs_part_size))
 
-    do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op)
-    do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or
-                          fail_sig_missing_fields or fail_unknown_sig_version
-                          or fail_incorrect_sig)
+    do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or
+                          fail_unknown_sig_version or fail_incorrect_sig)
 
     sigs_data = None
     if do_forge_sigs_data:
@@ -937,22 +915,12 @@
       sigs_data = sigs_gen.ToBinary()
       payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data))
 
-    if do_forge_pseudo_op:
-      assert sigs_data is not None, 'should have forged signatures blob by now'
-      sigs_len = len(sigs_data)
-      payload_gen.AddOperation(
-          False, common.OpType.REPLACE,
-          data_offset=payload_gen.curr_offset / 2,
-          data_length=sigs_len / 2,
-          dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)])
-
     # Generate payload (complete w/ signature) and create the test object.
     payload_checker = _GetPayloadChecker(
         payload_gen.WriteToFileWithData,
         payload_gen_dargs={
             'sigs_data': sigs_data,
-            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
-            'do_add_pseudo_operation': not do_forge_pseudo_op})
+            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME})
     payload_checker.payload_type = checker._TYPE_FULL
     report = checker._PayloadReport()
 
@@ -962,8 +930,7 @@
         common.KERNEL: kernel_part_size
     })
 
-    should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
-                   fail_mismatched_pseudo_op or fail_sig_missing_fields or
+    should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or
                    fail_unknown_sig_version or fail_incorrect_sig)
     args = (report, test_utils._PUBKEY_FILE_NAME)
     if should_fail:
@@ -1016,9 +983,9 @@
     payload_gen.SetBlockSize(block_size)
     kernel_filesystem_size = test_utils.KiB(16)
     rootfs_filesystem_size = test_utils.MiB(2)
-    payload_gen.SetPartInfo(False, True, rootfs_filesystem_size,
+    payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size,
                             hashlib.sha256('fake-new-rootfs-content').digest())
-    payload_gen.SetPartInfo(True, True, kernel_filesystem_size,
+    payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size,
                             hashlib.sha256('fake-new-kernel-content').digest())
     payload_gen.SetMinorVersion(0)
 
@@ -1029,7 +996,7 @@
     if fail_rootfs_part_size_exceeded:
       rootfs_op_size += block_size
     payload_gen.AddOperationWithData(
-        False, common.OpType.REPLACE,
+        common.ROOTFS, common.OpType.REPLACE,
         dst_extents=[(0, rootfs_op_size / block_size)],
         data_blob=os.urandom(rootfs_op_size))
 
@@ -1040,7 +1007,7 @@
     if fail_kernel_part_size_exceeded:
       kernel_op_size += block_size
     payload_gen.AddOperationWithData(
-        True, common.OpType.REPLACE,
+        common.KERNEL, common.OpType.REPLACE,
         dst_extents=[(0, kernel_op_size / block_size)],
         data_blob=os.urandom(kernel_op_size))
 
@@ -1052,16 +1019,14 @@
     else:
       use_block_size = block_size
 
-    # For the unittests 246 is the value that generated for the payload.
-    metadata_size = 246
+    # For the unittests 237 is the value that generated for the payload.
+    metadata_size = 237
     if fail_mismatched_metadata_size:
       metadata_size += 1
 
     kwargs = {
         'payload_gen_dargs': {
             'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
-            'do_add_pseudo_operation': True,
-            'is_pseudo_in_kernel': True,
             'padding': os.urandom(1024) if fail_excess_data else None},
         'checker_init_dargs': {
             'assert_type': 'delta' if fail_wrong_payload_type else 'full',
@@ -1073,7 +1038,7 @@
       payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
                                            **kwargs)
 
-      kwargs = {
+      kwargs2 = {
           'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
           'metadata_size': metadata_size,
           'part_sizes': {
@@ -1085,15 +1050,14 @@
                      fail_rootfs_part_size_exceeded or
                      fail_kernel_part_size_exceeded)
       if should_fail:
-        self.assertRaises(PayloadError, payload_checker.Run, **kwargs)
+        self.assertRaises(PayloadError, payload_checker.Run, **kwargs2)
       else:
-        self.assertIsNone(payload_checker.Run(**kwargs))
+        self.assertIsNone(payload_checker.Run(**kwargs2))
 
 # This implements a generic API, hence the occasional unused args.
 # pylint: disable=W0613
-def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
-                               allow_unhashed, fail_src_extents,
-                               fail_dst_extents,
+def ValidateCheckOperationTest(op_type_name, allow_unhashed,
+                               fail_src_extents, fail_dst_extents,
                                fail_mismatched_data_offset_length,
                                fail_missing_dst_extents, fail_src_length,
                                fail_dst_length, fail_data_hash,
@@ -1147,7 +1111,7 @@
     run_method_name = 'Do%sTest' % tested_method_name
     test_method_name = 'test%s' % tested_method_name
     for arg_key, arg_val in run_dargs.iteritems():
-      if arg_val or type(arg_val) is int:
+      if arg_val or isinstance(arg_val, int):
         test_method_name += '__%s=%s' % (arg_key, arg_val)
     setattr(PayloadCheckerTest, test_method_name,
             TestMethodBody(run_method_name, run_dargs))
@@ -1196,8 +1160,6 @@
                      {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
                                        'SOURCE_COPY', 'SOURCE_BSDIFF',
                                        'PUFFDIFF', 'BROTLI_BSDIFF'),
-                      'is_last': (True, False),
-                      'allow_signature': (True, False),
                       'allow_unhashed': (True, False),
                       'fail_src_extents': (True, False),
                       'fail_dst_extents': (True, False),
@@ -1217,8 +1179,6 @@
   # Add all _CheckOperations() test cases.
   AddParametricTests('CheckSignatures',
                      {'fail_empty_sigs_blob': (True, False),
-                      'fail_missing_pseudo_op': (True, False),
-                      'fail_mismatched_pseudo_op': (True, False),
                       'fail_sig_missing_fields': (True, False),
                       'fail_unknown_sig_version': (True, False),
                       'fail_incorrect_sig': (True, False)})
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index b7b53dc..dfb8181 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -25,15 +25,12 @@
 #
 # Constants.
 #
-PSEUDO_EXTENT_MARKER = (1L << 64) - 1  # UINT64_MAX
-
 SIG_ASN1_HEADER = (
     '\x30\x31\x30\x0d\x06\x09\x60\x86'
     '\x48\x01\x65\x03\x04\x02\x01\x05'
     '\x00\x04\x20'
 )
 
-CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
 BRILLO_MAJOR_PAYLOAD_VERSION = 2
 
 SOURCE_MINOR_PAYLOAD_VERSION = 2
@@ -162,8 +159,7 @@
   end_block = ex.start_block + ex.num_blocks
   if block_size:
     return '%d->%d * %d' % (ex.start_block, end_block, block_size)
-  else:
-    return '%d->%d' % (ex.start_block, end_block)
+  return '%d->%d' % (ex.start_block, end_block)
 
 
 def FormatSha256(digest):
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 2a0cb58..1ed5f99 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -263,9 +263,7 @@
   def IsDelta(self):
     """Returns True iff the payload appears to be a delta."""
     self._AssertInit()
-    return (self.manifest.HasField('old_kernel_info') or
-            self.manifest.HasField('old_rootfs_info') or
-            any(partition.HasField('old_partition_info')
+    return (any(partition.HasField('old_partition_info')
                 for partition in self.manifest.partitions))
 
   def IsFull(self):
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index f0edad5..4f5fed0 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -173,31 +173,37 @@
     self.block_size = block_size
     _SetMsgField(self.manifest, 'block_size', block_size)
 
-  def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
+  def SetPartInfo(self, part_name, is_new, part_size, part_hash):
     """Set the partition info entry.
 
     Args:
-      is_kernel: whether this is kernel partition info
-      is_new: whether to set old (False) or new (True) info
-      part_size: the partition size (in fact, filesystem size)
-      part_hash: the partition hash
+      part_name: The name of the partition.
+      is_new: Whether to set old (False) or new (True) info.
+      part_size: The partition size (in fact, filesystem size).
+      part_hash: The partition hash.
     """
-    if is_kernel:
-      part_info = (self.manifest.new_kernel_info if is_new
-                   else self.manifest.old_kernel_info)
-    else:
-      part_info = (self.manifest.new_rootfs_info if is_new
-                   else self.manifest.old_rootfs_info)
+    partition = next((x for x in self.manifest.partitions
+                      if x.partition_name == part_name), None)
+    if partition is None:
+      partition = self.manifest.partitions.add()
+      partition.partition_name = part_name
+
+    part_info = (partition.new_partition_info if is_new
+                 else partition.old_partition_info)
     _SetMsgField(part_info, 'size', part_size)
     _SetMsgField(part_info, 'hash', part_hash)
 
-  def AddOperation(self, is_kernel, op_type, data_offset=None,
+  def AddOperation(self, part_name, op_type, data_offset=None,
                    data_length=None, src_extents=None, src_length=None,
                    dst_extents=None, dst_length=None, data_sha256_hash=None):
     """Adds an InstallOperation entry."""
-    operations = (self.manifest.kernel_install_operations if is_kernel
-                  else self.manifest.install_operations)
+    partition = next((x for x in self.manifest.partitions
+                      if x.partition_name == part_name), None)
+    if partition is None:
+      partition = self.manifest.partitions.add()
+      partition.partition_name = part_name
 
+    operations = partition.operations
     op = operations.add()
     op.type = op_type
 
@@ -277,7 +283,7 @@
     self.data_blobs.append(data_blob)
     return data_length, data_offset
 
-  def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
+  def AddOperationWithData(self, part_name, op_type, src_extents=None,
                            src_length=None, dst_extents=None, dst_length=None,
                            data_blob=None, do_hash_data_blob=True):
     """Adds an install operation and associated data blob.
@@ -287,7 +293,7 @@
     necessary offset/length accounting.
 
     Args:
-      is_kernel: whether this is a kernel (True) or rootfs (False) operation
+      part_name: The name of the partition (e.g. kernel or root).
       op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ.
       src_extents: list of (start, length) pairs indicating src block ranges
       src_length: size of the src data in bytes (needed for diff operations)
@@ -302,15 +308,13 @@
         data_sha256_hash = hashlib.sha256(data_blob).digest()
       data_length, data_offset = self.AddData(data_blob)
 
-    self.AddOperation(is_kernel, op_type, data_offset=data_offset,
+    self.AddOperation(part_name, op_type, data_offset=data_offset,
                       data_length=data_length, src_extents=src_extents,
                       src_length=src_length, dst_extents=dst_extents,
                       dst_length=dst_length, data_sha256_hash=data_sha256_hash)
 
   def WriteToFileWithData(self, file_obj, sigs_data=None,
-                          privkey_file_name=None,
-                          do_add_pseudo_operation=False,
-                          is_pseudo_in_kernel=False, padding=None):
+                          privkey_file_name=None, padding=None):
     """Writes the payload content to a file, optionally signing the content.
 
     Args:
@@ -319,10 +323,6 @@
                  payload signature fields assumed to be preset by the caller)
       privkey_file_name: key used for signing the payload (optional; used only
                          if explicit signatures blob not provided)
-      do_add_pseudo_operation: whether a pseudo-operation should be added to
-                               account for the signature blob
-      is_pseudo_in_kernel: whether the pseudo-operation should be added to
-                           kernel (True) or rootfs (False) operations
       padding: stuff to dump past the normal data blobs provided (optional)
 
     Raises:
@@ -343,17 +343,6 @@
       # Update the payload with proper signature attributes.
       self.SetSignatures(self.curr_offset, sigs_len)
 
-    # Add a pseudo-operation to account for the signature blob, if requested.
-    if do_add_pseudo_operation:
-      if not self.block_size:
-        raise TestError('cannot add pseudo-operation without knowing the '
-                        'payload block size')
-      self.AddOperation(
-          is_pseudo_in_kernel, common.OpType.REPLACE,
-          data_offset=self.curr_offset, data_length=sigs_len,
-          dst_extents=[(common.PSEUDO_EXTENT_MARKER,
-                        (sigs_len + self.block_size - 1) / self.block_size)])
-
     if do_generate_sigs_data:
       # Once all payload fields are updated, dump and sign it.
       temp_payload_file = cStringIO.StringIO()
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 6275642..907cc18 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -20,7 +20,7 @@
   package='chromeos_update_engine',
   syntax='proto2',
   serialized_options=_b('H\003'),
-  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd0\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x8f\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
+  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xc9\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
 )
 
 
@@ -40,38 +40,46 @@
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='SOURCE_COPY', index=2, number=4,
+      name='MOVE', index=2, number=2,
+      serialized_options=_b('\010\001'),
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='BSDIFF', index=3, number=3,
+      serialized_options=_b('\010\001'),
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SOURCE_COPY', index=4, number=4,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='SOURCE_BSDIFF', index=3, number=5,
+      name='SOURCE_BSDIFF', index=5, number=5,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='REPLACE_XZ', index=4, number=8,
+      name='REPLACE_XZ', index=6, number=8,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='ZERO', index=5, number=6,
+      name='ZERO', index=7, number=6,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='DISCARD', index=6, number=7,
+      name='DISCARD', index=8, number=7,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='BROTLI_BSDIFF', index=7, number=10,
+      name='BROTLI_BSDIFF', index=9, number=10,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='PUFFDIFF', index=8, number=9,
+      name='PUFFDIFF', index=10, number=9,
       serialized_options=None,
       type=None),
   ],
   containing_type=None,
   serialized_options=None,
   serialized_start=712,
-  serialized_end=855,
+  serialized_end=885,
 )
 _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
 
@@ -370,7 +378,7 @@
   oneofs=[
   ],
   serialized_start=391,
-  serialized_end=855,
+  serialized_end=885,
 )
 
 
@@ -505,8 +513,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=858,
-  serialized_end=1585,
+  serialized_start=888,
+  serialized_end=1615,
 )
 
 
@@ -550,8 +558,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1587,
-  serialized_end=1663,
+  serialized_start=1617,
+  serialized_end=1693,
 )
 
 
@@ -581,8 +589,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1665,
-  serialized_end=1754,
+  serialized_start=1695,
+  serialized_end=1784,
 )
 
 
@@ -599,14 +607,14 @@
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
       number=2, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
       number=3, type=13, cpp_type=3, label=1,
@@ -634,28 +642,28 @@
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
       number=7, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
       number=8, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
       number=9, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
       number=10, type=11, cpp_type=10, label=1,
@@ -710,8 +718,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1757,
-  serialized_end=2574,
+  serialized_start=1787,
+  serialized_end=2628,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -823,4 +831,12 @@
 
 
 DESCRIPTOR._options = None
+_INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
+_INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None
 # @@protoc_insertion_point(module_scope)
diff --git a/update_metadata.proto b/update_metadata.proto
index 3382f84..40db678 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -303,8 +303,8 @@
   // Only present in major version = 1. List of install operations for the
   // kernel and rootfs partitions. For major version = 2 see the |partitions|
   // field.
-  repeated InstallOperation install_operations = 1;
-  repeated InstallOperation kernel_install_operations = 2;
+  repeated InstallOperation install_operations = 1 [deprecated = true];
+  repeated InstallOperation kernel_install_operations = 2 [deprecated = true];
 
   // (At time of writing) usually 4096
   optional uint32 block_size = 3 [default = 4096];
@@ -319,10 +319,10 @@
 
   // Only present in major version = 1. Partition metadata used to validate the
   // update. For major version = 2 see the |partitions| field.
-  optional PartitionInfo old_kernel_info = 6;
-  optional PartitionInfo new_kernel_info = 7;
-  optional PartitionInfo old_rootfs_info = 8;
-  optional PartitionInfo new_rootfs_info = 9;
+  optional PartitionInfo old_kernel_info = 6 [deprecated = true];
+  optional PartitionInfo new_kernel_info = 7 [deprecated = true];
+  optional PartitionInfo old_rootfs_info = 8 [deprecated = true];
+  optional PartitionInfo new_rootfs_info = 9 [deprecated = true];
 
   // old_image_info will only be present for delta images.
   optional ImageInfo old_image_info = 10;