update_engine: Deprecate major version 1

We have moved away from major version 1 in Chrome OS and already have a
stepping stone for it in M53. So this cleanup makes the code much easier
to understand.

BUG=chromium:1008553
TEST=FEATURES="test" sudo emerge update_engine update_payload
TEST=cros_generate_update_payload --image chromiumos_test_image.bin --check --output delta.bin

Change-Id: I01815dfa5fdf395f8214ef162e01ecca2d42f7fc
Reviewed-on: https://chromium-review.googlesource.com/c/aosp/platform/system/update_engine/+/1857459
Tested-by: Amin Hassani <[email protected]>
Reviewed-by: Sen Jiang <[email protected]>
Commit-Queue: Amin Hassani <[email protected]>
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 3f64444..511ed49 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -29,7 +29,6 @@
 import array
 import bz2
 import hashlib
-import itertools
 # Not everywhere we can have the lzma library so we ignore it if we didn't have
 # it because it is not going to be used. For example, 'cros flash' uses
 # devserver code which eventually loads this file, but the lzma library is not
@@ -45,7 +44,6 @@
   except ImportError:
     pass
 import os
-import shutil
 import subprocess
 import sys
 import tempfile
@@ -116,12 +114,8 @@
       break
     read_length = min(max_length, ex.num_blocks * block_size)
 
-    # Fill with zeros or read from file, depending on the type of extent.
-    if ex.start_block == common.PSEUDO_EXTENT_MARKER:
-      data.extend(itertools.repeat('\0', read_length))
-    else:
-      file_obj.seek(ex.start_block * block_size)
-      data.fromfile(file_obj, read_length)
+    file_obj.seek(ex.start_block * block_size)
+    data.fromfile(file_obj, read_length)
 
     max_length -= read_length
 
@@ -150,11 +144,9 @@
       raise PayloadError('%s: more write extents than data' % ex_name)
     write_length = min(data_length, ex.num_blocks * block_size)
 
-    # Only do actual writing if this is not a pseudo-extent.
-    if ex.start_block != common.PSEUDO_EXTENT_MARKER:
-      file_obj.seek(ex.start_block * block_size)
-      data_view = buffer(data, data_offset, write_length)
-      file_obj.write(data_view)
+    file_obj.seek(ex.start_block * block_size)
+    data_view = buffer(data, data_offset, write_length)
+    file_obj.write(data_view)
 
     data_offset += write_length
     data_length -= write_length
@@ -189,15 +181,12 @@
     if not data_length:
       raise PayloadError('%s: more extents than total data length' % ex_name)
 
-    is_pseudo = ex.start_block == common.PSEUDO_EXTENT_MARKER
-    start_byte = -1 if is_pseudo else ex.start_block * block_size
+    start_byte = ex.start_block * block_size
     num_bytes = ex.num_blocks * block_size
     if data_length < num_bytes:
       # We're only padding a real extent.
-      if not is_pseudo:
-        pad_off = start_byte + data_length
-        pad_len = num_bytes - data_length
-
+      pad_off = start_byte + data_length
+      pad_len = num_bytes - data_length
       num_bytes = data_length
 
     arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes)
@@ -274,30 +263,28 @@
       num_blocks = ex.num_blocks
       count = num_blocks * block_size
 
-      # Make sure it's not a fake (signature) operation.
-      if start_block != common.PSEUDO_EXTENT_MARKER:
-        data_end = data_start + count
+      data_end = data_start + count
 
-        # Make sure we're not running past partition boundary.
-        if (start_block + num_blocks) * block_size > part_size:
-          raise PayloadError(
-              '%s: extent (%s) exceeds partition size (%d)' %
-              (ex_name, common.FormatExtent(ex, block_size),
-               part_size))
+      # Make sure we're not running past partition boundary.
+      if (start_block + num_blocks) * block_size > part_size:
+        raise PayloadError(
+            '%s: extent (%s) exceeds partition size (%d)' %
+            (ex_name, common.FormatExtent(ex, block_size),
+             part_size))
 
-        # Make sure that we have enough data to write.
-        if data_end >= data_length + block_size:
-          raise PayloadError(
-              '%s: more dst blocks than data (even with padding)')
+      # Make sure that we have enough data to write.
+      if data_end >= data_length + block_size:
+        raise PayloadError(
+            '%s: more dst blocks than data (even with padding)')
 
-        # Pad with zeros if necessary.
-        if data_end > data_length:
-          padding = data_end - data_length
-          out_data += '\0' * padding
+      # Pad with zeros if necessary.
+      if data_end > data_length:
+        padding = data_end - data_length
+        out_data += '\0' * padding
 
-        self.payload.payload_file.seek(start_block * block_size)
-        part_file.seek(start_block * block_size)
-        part_file.write(out_data[data_start:data_end])
+      self.payload.payload_file.seek(start_block * block_size)
+      part_file.seek(start_block * block_size)
+      part_file.write(out_data[data_start:data_end])
 
       data_start += count
 
@@ -323,10 +310,8 @@
     # Iterate over the extents and write zero.
     # pylint: disable=unused-variable
     for ex, ex_name in common.ExtentIter(op.dst_extents, base_name):
-      # Only do actual writing if this is not a pseudo-extent.
-      if ex.start_block != common.PSEUDO_EXTENT_MARKER:
-        part_file.seek(ex.start_block * block_size)
-        part_file.write('\0' * (ex.num_blocks * block_size))
+      part_file.seek(ex.start_block * block_size)
+      part_file.write('\0' * (ex.num_blocks * block_size))
 
   def _ApplySourceCopyOperation(self, op, op_name, old_part_file,
                                 new_part_file):
@@ -597,20 +582,11 @@
     install_operations = []
 
     manifest = self.payload.manifest
-    if self.payload.header.version == 1:
-      for real_name, proto_name in common.CROS_PARTITIONS:
-        new_part_info[real_name] = getattr(manifest, 'new_%s_info' % proto_name)
-        old_part_info[real_name] = getattr(manifest, 'old_%s_info' % proto_name)
-
-      install_operations.append((common.ROOTFS, manifest.install_operations))
-      install_operations.append((common.KERNEL,
-                                 manifest.kernel_install_operations))
-    else:
-      for part in manifest.partitions:
-        name = part.partition_name
-        new_part_info[name] = part.new_partition_info
-        old_part_info[name] = part.old_partition_info
-        install_operations.append((name, part.operations))
+    for part in manifest.partitions:
+      name = part.partition_name
+      new_part_info[name] = part.new_partition_info
+      old_part_info[name] = part.old_partition_info
+      install_operations.append((name, part.operations))
 
     part_names = set(new_part_info.keys())  # Equivalently, old_part_info.keys()
 
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 674d9f4..4558872 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -45,11 +45,9 @@
 # Constants.
 #
 
-_CHECK_DST_PSEUDO_EXTENTS = 'dst-pseudo-extents'
 _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
 _CHECK_PAYLOAD_SIG = 'payload-sig'
 CHECKS_TO_DISABLE = (
-    _CHECK_DST_PSEUDO_EXTENTS,
     _CHECK_MOVE_SAME_SRC_DST_BLOCK,
     _CHECK_PAYLOAD_SIG,
 )
@@ -320,8 +318,6 @@
     self.allow_unhashed = allow_unhashed
 
     # Disable specific tests.
-    self.check_dst_pseudo_extents = (
-        _CHECK_DST_PSEUDO_EXTENTS not in disabled_tests)
     self.check_move_same_src_dst_block = (
         _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests)
     self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests
@@ -625,35 +621,23 @@
     self._CheckPresentIff(self.sigs_offset, self.sigs_size,
                           'signatures_offset', 'signatures_size', 'manifest')
 
-    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-      for real_name, proto_name in common.CROS_PARTITIONS:
-        self.old_part_info[real_name] = self._CheckOptionalSubMsg(
-            manifest, 'old_%s_info' % proto_name, report)
-        self.new_part_info[real_name] = self._CheckMandatorySubMsg(
-            manifest, 'new_%s_info' % proto_name, report, 'manifest')
+    for part in manifest.partitions:
+      name = part.partition_name
+      self.old_part_info[name] = self._CheckOptionalSubMsg(
+          part, 'old_partition_info', report)
+      self.new_part_info[name] = self._CheckMandatorySubMsg(
+          part, 'new_partition_info', report, 'manifest.partitions')
 
-      # Check: old_kernel_info <==> old_rootfs_info.
-      self._CheckPresentIff(self.old_part_info[common.KERNEL].msg,
-                            self.old_part_info[common.ROOTFS].msg,
-                            'old_kernel_info', 'old_rootfs_info', 'manifest')
-    else:
-      for part in manifest.partitions:
-        name = part.partition_name
-        self.old_part_info[name] = self._CheckOptionalSubMsg(
-            part, 'old_partition_info', report)
-        self.new_part_info[name] = self._CheckMandatorySubMsg(
-            part, 'new_partition_info', report, 'manifest.partitions')
+    # Check: Old-style partition infos should not be specified.
+    for _, part in common.CROS_PARTITIONS:
+      self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
+      self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
 
-      # Check: Old-style partition infos should not be specified.
-      for _, part in common.CROS_PARTITIONS:
-        self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest')
-        self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest')
-
-      # Check: If old_partition_info is specified anywhere, it must be
-      # specified everywhere.
-      old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
-      self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
-                                'manifest.partitions')
+    # Check: If old_partition_info is specified anywhere, it must be
+    # specified everywhere.
+    old_part_msgs = [part.msg for part in self.old_part_info.values() if part]
+    self._CheckPresentIffMany(old_part_msgs, 'old_partition_info',
+                              'manifest.partitions')
 
     is_delta = any(part and part.msg for part in self.old_part_info.values())
     if is_delta:
@@ -721,8 +705,7 @@
     self._CheckBlocksFitLength(length, total_blocks, self.block_size,
                                '%s: %s' % (op_name, length_name))
 
-  def _CheckExtents(self, extents, usable_size, block_counters, name,
-                    allow_pseudo=False, allow_signature=False):
+  def _CheckExtents(self, extents, usable_size, block_counters, name):
     """Checks a sequence of extents.
 
     Args:
@@ -730,8 +713,6 @@
       usable_size: The usable size of the partition to which the extents apply.
       block_counters: Array of counters corresponding to the number of blocks.
       name: The name of the extent block.
-      allow_pseudo: Whether or not pseudo block numbers are allowed.
-      allow_signature: Whether or not the extents are used for a signature.
 
     Returns:
       The total number of blocks in the extents.
@@ -752,20 +733,15 @@
       if num_blocks == 0:
         raise error.PayloadError('%s: extent length is zero.' % ex_name)
 
-      if start_block != common.PSEUDO_EXTENT_MARKER:
-        # Check: Make sure we're within the partition limit.
-        if usable_size and end_block * self.block_size > usable_size:
-          raise error.PayloadError(
-              '%s: extent (%s) exceeds usable partition size (%d).' %
-              (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
+      # Check: Make sure we're within the partition limit.
+      if usable_size and end_block * self.block_size > usable_size:
+        raise error.PayloadError(
+            '%s: extent (%s) exceeds usable partition size (%d).' %
+            (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
 
-        # Record block usage.
-        for i in xrange(start_block, end_block):
-          block_counters[i] += 1
-      elif not (allow_pseudo or (allow_signature and len(extents) == 1)):
-        # Pseudo-extents must be allowed explicitly, or otherwise be part of a
-        # signature operation (in which case there has to be exactly one).
-        raise error.PayloadError('%s: unexpected pseudo-extent.' % ex_name)
+      # Record block usage.
+      for i in xrange(start_block, end_block):
+        block_counters[i] += 1
 
       total_num_blocks += num_blocks
 
@@ -896,21 +872,19 @@
     if self.minor_version >= 3 and op.src_sha256_hash is None:
       raise error.PayloadError('%s: source hash missing.' % op_name)
 
-  def _CheckOperation(self, op, op_name, is_last, old_block_counters,
-                      new_block_counters, old_usable_size, new_usable_size,
-                      prev_data_offset, allow_signature, blob_hash_counts):
+  def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters,
+                      old_usable_size, new_usable_size, prev_data_offset,
+                      blob_hash_counts):
     """Checks a single update operation.
 
     Args:
       op: The operation object.
       op_name: Operation name string for error reporting.
-      is_last: Whether this is the last operation in the sequence.
       old_block_counters: Arrays of block read counters.
       new_block_counters: Arrays of block write counters.
       old_usable_size: The overall usable size for src data in bytes.
       new_usable_size: The overall usable size for dst data in bytes.
       prev_data_offset: Offset of last used data bytes.
-      allow_signature: Whether this may be a signature operation.
       blob_hash_counts: Counters for hashed/unhashed blobs.
 
     Returns:
@@ -922,14 +896,10 @@
     # Check extents.
     total_src_blocks = self._CheckExtents(
         op.src_extents, old_usable_size, old_block_counters,
-        op_name + '.src_extents', allow_pseudo=True)
-    allow_signature_in_extents = (allow_signature and is_last and
-                                  op.type == common.OpType.REPLACE)
+        op_name + '.src_extents')
     total_dst_blocks = self._CheckExtents(
         op.dst_extents, new_usable_size, new_block_counters,
-        op_name + '.dst_extents',
-        allow_pseudo=(not self.check_dst_pseudo_extents),
-        allow_signature=allow_signature_in_extents)
+        op_name + '.dst_extents')
 
     # Check: data_offset present <==> data_length present.
     data_offset = self._CheckOptionalField(op, 'data_offset', None)
@@ -965,9 +935,7 @@
             (op_name, common.FormatSha256(op.data_sha256_hash),
              common.FormatSha256(actual_hash.digest())))
     elif data_offset is not None:
-      if allow_signature_in_extents:
-        blob_hash_counts['signature'] += 1
-      elif self.allow_unhashed:
+      if self.allow_unhashed:
         blob_hash_counts['unhashed'] += 1
       else:
         raise error.PayloadError('%s: unhashed operation not allowed.' %
@@ -981,11 +949,8 @@
             (op_name, data_offset, prev_data_offset))
 
     # Type-specific checks.
-    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
-      self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
-    elif (op.type == common.OpType.REPLACE_XZ and
-          (self.minor_version >= 3 or
-           self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION)):
+    if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ,
+                   common.OpType.REPLACE_XZ):
       self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name)
     elif op.type == common.OpType.ZERO and self.minor_version >= 4:
       self._CheckZeroOperation(op, op_name)
@@ -1030,7 +995,7 @@
 
   def _CheckOperations(self, operations, report, base_name, old_fs_size,
                        new_fs_size, old_usable_size, new_usable_size,
-                       prev_data_offset, allow_signature):
+                       prev_data_offset):
     """Checks a sequence of update operations.
 
     Args:
@@ -1042,7 +1007,6 @@
       old_usable_size: The overall usable size of the old partition in bytes.
       new_usable_size: The overall usable size of the new partition in bytes.
       prev_data_offset: Offset of last used data bytes.
-      allow_signature: Whether this sequence may contain signature operations.
 
     Returns:
       The total data blob size used.
@@ -1078,8 +1042,6 @@
         'hashed': 0,
         'unhashed': 0,
     }
-    if allow_signature:
-      blob_hash_counts['signature'] = 0
 
     # Allocate old and new block counters.
     old_block_counters = (self._AllocBlockCounters(old_usable_size)
@@ -1096,12 +1058,10 @@
         raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type))
       op_counts[op.type] += 1
 
-      is_last = op_num == len(operations)
       curr_data_used = self._CheckOperation(
-          op, op_name, is_last, old_block_counters, new_block_counters,
+          op, op_name, old_block_counters, new_block_counters,
           old_usable_size, new_usable_size,
-          prev_data_offset + total_data_used, allow_signature,
-          blob_hash_counts)
+          prev_data_offset + total_data_used, blob_hash_counts)
       if curr_data_used:
         op_blob_totals[op.type] += curr_data_used
         total_data_used += curr_data_used
@@ -1155,21 +1115,18 @@
     if not sigs.signatures:
       raise error.PayloadError('Signature block is empty.')
 
-    last_ops_section = (self.payload.manifest.kernel_install_operations or
-                        self.payload.manifest.install_operations)
-
-    # Only major version 1 has the fake signature OP at the end.
-    if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-      fake_sig_op = last_ops_section[-1]
+    # Check that we don't have the signature operation blob at the end (used to
+    # be for major version 1).
+    last_partition = self.payload.manifest.partitions[-1]
+    if last_partition.operations:
+      last_op = last_partition.operations[-1]
       # Check: signatures_{offset,size} must match the last (fake) operation.
-      if not (fake_sig_op.type == common.OpType.REPLACE and
-              self.sigs_offset == fake_sig_op.data_offset and
-              self.sigs_size == fake_sig_op.data_length):
-        raise error.PayloadError('Signatures_{offset,size} (%d+%d) does not'
-                                 ' match last operation (%d+%d).' %
-                                 (self.sigs_offset, self.sigs_size,
-                                  fake_sig_op.data_offset,
-                                  fake_sig_op.data_length))
+      if (last_op.type == common.OpType.REPLACE and
+          last_op.data_offset == self.sigs_offset and
+          last_op.data_length == self.sigs_size):
+        raise error.PayloadError('It seems like the last operation is the '
+                                 'signature blob. This is an invalid payload.')
+
 
     # Compute the checksum of all data up to signature blob.
     # TODO(garnold) we're re-reading the whole data section into a string
@@ -1248,29 +1205,17 @@
       self._CheckManifest(report, part_sizes)
       assert self.payload_type, 'payload type should be known by now'
 
-      manifest = self.payload.manifest
-
-      # Part 3: Examine partition operations.
-      install_operations = []
-      if self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION:
-        # partitions field should not ever exist in major version 1 payloads
-        self._CheckRepeatedElemNotPresent(manifest, 'partitions', 'manifest')
-
-        install_operations.append((common.ROOTFS, manifest.install_operations))
-        install_operations.append((common.KERNEL,
-                                   manifest.kernel_install_operations))
-
-      else:
-        self._CheckRepeatedElemNotPresent(manifest, 'install_operations',
+      # Make sure deprecated values are not present in the payload.
+      for field in ('install_operations', 'kernel_install_operations'):
+        self._CheckRepeatedElemNotPresent(self.payload.manifest, field,
                                           'manifest')
-        self._CheckRepeatedElemNotPresent(manifest, 'kernel_install_operations',
-                                          'manifest')
-
-        for update in manifest.partitions:
-          install_operations.append((update.partition_name, update.operations))
+      for field in ('old_kernel_info', 'old_rootfs_info',
+                    'new_kernel_info', 'new_rootfs_info'):
+        self._CheckElemNotPresent(self.payload.manifest, field, 'manifest')
 
       total_blob_size = 0
-      for part, operations in install_operations:
+      for part, operations in ((p.partition_name, p.operations)
+                               for p in self.payload.manifest.partitions):
         report.AddSection('%s operations' % part)
 
         new_fs_usable_size = self.new_fs_sizes[part]
@@ -1285,16 +1230,13 @@
         total_blob_size += self._CheckOperations(
             operations, report, '%s_install_operations' % part,
             self.old_fs_sizes[part], self.new_fs_sizes[part],
-            old_fs_usable_size, new_fs_usable_size, total_blob_size,
-            (self.major_version == common.CHROMEOS_MAJOR_PAYLOAD_VERSION
-             and part == common.KERNEL))
+            old_fs_usable_size, new_fs_usable_size, total_blob_size)
 
       # Check: Operations data reach the end of the payload file.
       used_payload_size = self.payload.data_offset + total_blob_size
       # Major versions 2 and higher have a signature at the end, so it should be
       # considered in the total size of the image.
-      if (self.major_version >= common.BRILLO_MAJOR_PAYLOAD_VERSION and
-          self.sigs_size):
+      if self.sigs_size:
         used_payload_size += self.sigs_size
 
       if used_payload_size != payload_file_size:
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index b5f2f3e..4881653 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -427,10 +427,10 @@
       payload_gen.SetBlockSize(test_utils.KiB(4))
 
     # Add some operations.
-    payload_gen.AddOperation(False, common.OpType.SOURCE_COPY,
+    payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY,
                              src_extents=[(0, 16), (16, 497)],
                              dst_extents=[(16, 496), (0, 16)])
-    payload_gen.AddOperation(True, common.OpType.SOURCE_COPY,
+    payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY,
                              src_extents=[(0, 8), (8, 8)],
                              dst_extents=[(8, 8), (0, 8)])
 
@@ -456,19 +456,21 @@
     if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
       oki_hash = (None if fail_bad_oki
                   else hashlib.sha256('fake-oki-content').digest())
-      payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
+      payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size,
+                              oki_hash)
     if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
                                         fail_bad_ori):
       ori_hash = (None if fail_bad_ori
                   else hashlib.sha256('fake-ori-content').digest())
-      payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
+      payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size,
+                              ori_hash)
 
     # Add new kernel/rootfs partition info.
     payload_gen.SetPartInfo(
-        True, True, new_kernel_fs_size,
+        common.KERNEL, True, new_kernel_fs_size,
         None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
     payload_gen.SetPartInfo(
-        False, True, new_rootfs_fs_size,
+        common.ROOTFS, True, new_rootfs_fs_size,
         None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
 
     # Set the minor version.
@@ -521,23 +523,6 @@
         payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
                                       collections.defaultdict(int), 'foo'))
 
-    # Passes w/ pseudo-extents (aka sparse holes).
-    extents = self.NewExtentList((0, 4), (common.PSEUDO_EXTENT_MARKER, 5),
-                                 (8, 3))
-    self.assertEquals(
-        12,
-        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
-                                      collections.defaultdict(int), 'foo',
-                                      allow_pseudo=True))
-
-    # Passes w/ pseudo-extent due to a signature.
-    extents = self.NewExtentList((common.PSEUDO_EXTENT_MARKER, 2))
-    self.assertEquals(
-        2,
-        payload_checker._CheckExtents(extents, (1024 + 16) * block_size,
-                                      collections.defaultdict(int), 'foo',
-                                      allow_signature=True))
-
     # Fails, extent missing a start block.
     extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16))
     self.assertRaises(
@@ -704,8 +689,8 @@
     self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation,
                       None, 0, 1, 'foo')
 
-  def DoCheckOperationTest(self, op_type_name, is_last, allow_signature,
-                           allow_unhashed, fail_src_extents, fail_dst_extents,
+  def DoCheckOperationTest(self, op_type_name, allow_unhashed,
+                           fail_src_extents, fail_dst_extents,
                            fail_mismatched_data_offset_length,
                            fail_missing_dst_extents, fail_src_length,
                            fail_dst_length, fail_data_hash,
@@ -715,8 +700,6 @@
     Args:
       op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
         'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'.
-      is_last: Whether we're testing the last operation in a sequence.
-      allow_signature: Whether we're testing a signature-capable operation.
       allow_unhashed: Whether we're allowing to not hash the data.
       fail_src_extents: Tamper with src extents.
       fail_dst_extents: Tamper with dst extents.
@@ -762,8 +745,7 @@
                           self.NewExtentList((1, 16)))
         total_src_blocks = 16
 
-    # TODO(tbrindus): add major version 2 tests.
-    payload_checker.major_version = common.CHROMEOS_MAJOR_PAYLOAD_VERSION
+    payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION
     if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ):
       payload_checker.minor_version = 0
     elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF):
@@ -785,13 +767,11 @@
         op.data_offset = prev_data_offset
 
       fake_data = 'fake-data'.ljust(op.data_length)
-      if not (allow_unhashed or (is_last and allow_signature and
-                                 op_type == common.OpType.REPLACE)):
-        if not fail_data_hash:
-          # Create a valid data blob hash.
-          op.data_sha256_hash = hashlib.sha256(fake_data).digest()
-          payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
-              fake_data)
+      if not allow_unhashed and not fail_data_hash:
+        # Create a valid data blob hash.
+        op.data_sha256_hash = hashlib.sha256(fake_data).digest()
+        payload.ReadDataBlob(op.data_offset, op.data_length).AndReturn(
+            fake_data)
 
       elif fail_data_hash:
         # Create an invalid data blob hash.
@@ -833,8 +813,8 @@
                    fail_missing_dst_extents or fail_src_length or
                    fail_dst_length or fail_data_hash or fail_prev_data_offset or
                    fail_bad_minor_version)
-    args = (op, 'foo', is_last, old_block_counters, new_block_counters,
-            old_part_size, new_part_size, prev_data_offset, allow_signature,
+    args = (op, 'foo', old_block_counters, new_block_counters,
+            old_part_size, new_part_size, prev_data_offset,
             blob_hash_counts)
     if should_fail:
       self.assertRaises(PayloadError, payload_checker._CheckOperation, *args)
@@ -876,7 +856,7 @@
     if fail_nonexhaustive_full_update:
       rootfs_data_length -= block_size
 
-    payload_gen.AddOperation(False, rootfs_op_type,
+    payload_gen.AddOperation(common.ROOTFS, rootfs_op_type,
                              dst_extents=[(0, rootfs_data_length / block_size)],
                              data_offset=0,
                              data_length=rootfs_data_length)
@@ -887,17 +867,17 @@
                                              'allow_unhashed': True})
     payload_checker.payload_type = checker._TYPE_FULL
     report = checker._PayloadReport()
-
-    args = (payload_checker.payload.manifest.install_operations, report, 'foo',
-            0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0, False)
+    partition = next((p for p in payload_checker.payload.manifest.partitions
+                      if p.partition_name == common.ROOTFS), None)
+    args = (partition.operations, report, 'foo',
+            0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0)
     if fail_nonexhaustive_full_update:
       self.assertRaises(PayloadError, payload_checker._CheckOperations, *args)
     else:
       self.assertEqual(rootfs_data_length,
                        payload_checker._CheckOperations(*args))
 
-  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_missing_pseudo_op,
-                            fail_mismatched_pseudo_op, fail_sig_missing_fields,
+  def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields,
                             fail_unknown_sig_version, fail_incorrect_sig):
     """Tests _CheckSignatures()."""
     # Generate a test payload. For this test, we only care about the signature
@@ -908,20 +888,18 @@
     payload_gen.SetBlockSize(block_size)
     rootfs_part_size = test_utils.MiB(2)
     kernel_part_size = test_utils.KiB(16)
-    payload_gen.SetPartInfo(False, True, rootfs_part_size,
+    payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size,
                             hashlib.sha256('fake-new-rootfs-content').digest())
-    payload_gen.SetPartInfo(True, True, kernel_part_size,
+    payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size,
                             hashlib.sha256('fake-new-kernel-content').digest())
     payload_gen.SetMinorVersion(0)
     payload_gen.AddOperationWithData(
-        False, common.OpType.REPLACE,
+        common.ROOTFS, common.OpType.REPLACE,
         dst_extents=[(0, rootfs_part_size / block_size)],
         data_blob=os.urandom(rootfs_part_size))
 
-    do_forge_pseudo_op = (fail_missing_pseudo_op or fail_mismatched_pseudo_op)
-    do_forge_sigs_data = (do_forge_pseudo_op or fail_empty_sigs_blob or
-                          fail_sig_missing_fields or fail_unknown_sig_version
-                          or fail_incorrect_sig)
+    do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or
+                          fail_unknown_sig_version or fail_incorrect_sig)
 
     sigs_data = None
     if do_forge_sigs_data:
@@ -937,22 +915,12 @@
       sigs_data = sigs_gen.ToBinary()
       payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data))
 
-    if do_forge_pseudo_op:
-      assert sigs_data is not None, 'should have forged signatures blob by now'
-      sigs_len = len(sigs_data)
-      payload_gen.AddOperation(
-          False, common.OpType.REPLACE,
-          data_offset=payload_gen.curr_offset / 2,
-          data_length=sigs_len / 2,
-          dst_extents=[(0, (sigs_len / 2 + block_size - 1) / block_size)])
-
     # Generate payload (complete w/ signature) and create the test object.
     payload_checker = _GetPayloadChecker(
         payload_gen.WriteToFileWithData,
         payload_gen_dargs={
             'sigs_data': sigs_data,
-            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
-            'do_add_pseudo_operation': not do_forge_pseudo_op})
+            'privkey_file_name': test_utils._PRIVKEY_FILE_NAME})
     payload_checker.payload_type = checker._TYPE_FULL
     report = checker._PayloadReport()
 
@@ -962,8 +930,7 @@
         common.KERNEL: kernel_part_size
     })
 
-    should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
-                   fail_mismatched_pseudo_op or fail_sig_missing_fields or
+    should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or
                    fail_unknown_sig_version or fail_incorrect_sig)
     args = (report, test_utils._PUBKEY_FILE_NAME)
     if should_fail:
@@ -1016,9 +983,9 @@
     payload_gen.SetBlockSize(block_size)
     kernel_filesystem_size = test_utils.KiB(16)
     rootfs_filesystem_size = test_utils.MiB(2)
-    payload_gen.SetPartInfo(False, True, rootfs_filesystem_size,
+    payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size,
                             hashlib.sha256('fake-new-rootfs-content').digest())
-    payload_gen.SetPartInfo(True, True, kernel_filesystem_size,
+    payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size,
                             hashlib.sha256('fake-new-kernel-content').digest())
     payload_gen.SetMinorVersion(0)
 
@@ -1029,7 +996,7 @@
     if fail_rootfs_part_size_exceeded:
       rootfs_op_size += block_size
     payload_gen.AddOperationWithData(
-        False, common.OpType.REPLACE,
+        common.ROOTFS, common.OpType.REPLACE,
         dst_extents=[(0, rootfs_op_size / block_size)],
         data_blob=os.urandom(rootfs_op_size))
 
@@ -1040,7 +1007,7 @@
     if fail_kernel_part_size_exceeded:
       kernel_op_size += block_size
     payload_gen.AddOperationWithData(
-        True, common.OpType.REPLACE,
+        common.KERNEL, common.OpType.REPLACE,
         dst_extents=[(0, kernel_op_size / block_size)],
         data_blob=os.urandom(kernel_op_size))
 
@@ -1052,16 +1019,14 @@
     else:
       use_block_size = block_size
 
-    # For the unittests 246 is the value that generated for the payload.
-    metadata_size = 246
+    # For the unittests 237 is the value that generated for the payload.
+    metadata_size = 237
     if fail_mismatched_metadata_size:
       metadata_size += 1
 
     kwargs = {
         'payload_gen_dargs': {
             'privkey_file_name': test_utils._PRIVKEY_FILE_NAME,
-            'do_add_pseudo_operation': True,
-            'is_pseudo_in_kernel': True,
             'padding': os.urandom(1024) if fail_excess_data else None},
         'checker_init_dargs': {
             'assert_type': 'delta' if fail_wrong_payload_type else 'full',
@@ -1073,7 +1038,7 @@
       payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData,
                                            **kwargs)
 
-      kwargs = {
+      kwargs2 = {
           'pubkey_file_name': test_utils._PUBKEY_FILE_NAME,
           'metadata_size': metadata_size,
           'part_sizes': {
@@ -1085,15 +1050,14 @@
                      fail_rootfs_part_size_exceeded or
                      fail_kernel_part_size_exceeded)
       if should_fail:
-        self.assertRaises(PayloadError, payload_checker.Run, **kwargs)
+        self.assertRaises(PayloadError, payload_checker.Run, **kwargs2)
       else:
-        self.assertIsNone(payload_checker.Run(**kwargs))
+        self.assertIsNone(payload_checker.Run(**kwargs2))
 
 # This implements a generic API, hence the occasional unused args.
 # pylint: disable=W0613
-def ValidateCheckOperationTest(op_type_name, is_last, allow_signature,
-                               allow_unhashed, fail_src_extents,
-                               fail_dst_extents,
+def ValidateCheckOperationTest(op_type_name, allow_unhashed,
+                               fail_src_extents, fail_dst_extents,
                                fail_mismatched_data_offset_length,
                                fail_missing_dst_extents, fail_src_length,
                                fail_dst_length, fail_data_hash,
@@ -1147,7 +1111,7 @@
     run_method_name = 'Do%sTest' % tested_method_name
     test_method_name = 'test%s' % tested_method_name
     for arg_key, arg_val in run_dargs.iteritems():
-      if arg_val or type(arg_val) is int:
+      if arg_val or isinstance(arg_val, int):
         test_method_name += '__%s=%s' % (arg_key, arg_val)
     setattr(PayloadCheckerTest, test_method_name,
             TestMethodBody(run_method_name, run_dargs))
@@ -1196,8 +1160,6 @@
                      {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ',
                                        'SOURCE_COPY', 'SOURCE_BSDIFF',
                                        'PUFFDIFF', 'BROTLI_BSDIFF'),
-                      'is_last': (True, False),
-                      'allow_signature': (True, False),
                       'allow_unhashed': (True, False),
                       'fail_src_extents': (True, False),
                       'fail_dst_extents': (True, False),
@@ -1217,8 +1179,6 @@
   # Add all _CheckOperations() test cases.
   AddParametricTests('CheckSignatures',
                      {'fail_empty_sigs_blob': (True, False),
-                      'fail_missing_pseudo_op': (True, False),
-                      'fail_mismatched_pseudo_op': (True, False),
                       'fail_sig_missing_fields': (True, False),
                       'fail_unknown_sig_version': (True, False),
                       'fail_incorrect_sig': (True, False)})
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index b7b53dc..dfb8181 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -25,15 +25,12 @@
 #
 # Constants.
 #
-PSEUDO_EXTENT_MARKER = (1L << 64) - 1  # UINT64_MAX
-
 SIG_ASN1_HEADER = (
     '\x30\x31\x30\x0d\x06\x09\x60\x86'
     '\x48\x01\x65\x03\x04\x02\x01\x05'
     '\x00\x04\x20'
 )
 
-CHROMEOS_MAJOR_PAYLOAD_VERSION = 1
 BRILLO_MAJOR_PAYLOAD_VERSION = 2
 
 SOURCE_MINOR_PAYLOAD_VERSION = 2
@@ -162,8 +159,7 @@
   end_block = ex.start_block + ex.num_blocks
   if block_size:
     return '%d->%d * %d' % (ex.start_block, end_block, block_size)
-  else:
-    return '%d->%d' % (ex.start_block, end_block)
+  return '%d->%d' % (ex.start_block, end_block)
 
 
 def FormatSha256(digest):
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index 2a0cb58..1ed5f99 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -263,9 +263,7 @@
   def IsDelta(self):
     """Returns True iff the payload appears to be a delta."""
     self._AssertInit()
-    return (self.manifest.HasField('old_kernel_info') or
-            self.manifest.HasField('old_rootfs_info') or
-            any(partition.HasField('old_partition_info')
+    return (any(partition.HasField('old_partition_info')
                 for partition in self.manifest.partitions))
 
   def IsFull(self):
diff --git a/scripts/update_payload/test_utils.py b/scripts/update_payload/test_utils.py
index f0edad5..4f5fed0 100644
--- a/scripts/update_payload/test_utils.py
+++ b/scripts/update_payload/test_utils.py
@@ -173,31 +173,37 @@
     self.block_size = block_size
     _SetMsgField(self.manifest, 'block_size', block_size)
 
-  def SetPartInfo(self, is_kernel, is_new, part_size, part_hash):
+  def SetPartInfo(self, part_name, is_new, part_size, part_hash):
     """Set the partition info entry.
 
     Args:
-      is_kernel: whether this is kernel partition info
-      is_new: whether to set old (False) or new (True) info
-      part_size: the partition size (in fact, filesystem size)
-      part_hash: the partition hash
+      part_name: The name of the partition.
+      is_new: Whether to set old (False) or new (True) info.
+      part_size: The partition size (in fact, filesystem size).
+      part_hash: The partition hash.
     """
-    if is_kernel:
-      part_info = (self.manifest.new_kernel_info if is_new
-                   else self.manifest.old_kernel_info)
-    else:
-      part_info = (self.manifest.new_rootfs_info if is_new
-                   else self.manifest.old_rootfs_info)
+    partition = next((x for x in self.manifest.partitions
+                      if x.partition_name == part_name), None)
+    if partition is None:
+      partition = self.manifest.partitions.add()
+      partition.partition_name = part_name
+
+    part_info = (partition.new_partition_info if is_new
+                 else partition.old_partition_info)
     _SetMsgField(part_info, 'size', part_size)
     _SetMsgField(part_info, 'hash', part_hash)
 
-  def AddOperation(self, is_kernel, op_type, data_offset=None,
+  def AddOperation(self, part_name, op_type, data_offset=None,
                    data_length=None, src_extents=None, src_length=None,
                    dst_extents=None, dst_length=None, data_sha256_hash=None):
     """Adds an InstallOperation entry."""
-    operations = (self.manifest.kernel_install_operations if is_kernel
-                  else self.manifest.install_operations)
+    partition = next((x for x in self.manifest.partitions
+                      if x.partition_name == part_name), None)
+    if partition is None:
+      partition = self.manifest.partitions.add()
+      partition.partition_name = part_name
 
+    operations = partition.operations
     op = operations.add()
     op.type = op_type
 
@@ -277,7 +283,7 @@
     self.data_blobs.append(data_blob)
     return data_length, data_offset
 
-  def AddOperationWithData(self, is_kernel, op_type, src_extents=None,
+  def AddOperationWithData(self, part_name, op_type, src_extents=None,
                            src_length=None, dst_extents=None, dst_length=None,
                            data_blob=None, do_hash_data_blob=True):
     """Adds an install operation and associated data blob.
@@ -287,7 +293,7 @@
     necessary offset/length accounting.
 
     Args:
-      is_kernel: whether this is a kernel (True) or rootfs (False) operation
+      part_name: The name of the partition (e.g. kernel or root).
       op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ.
       src_extents: list of (start, length) pairs indicating src block ranges
       src_length: size of the src data in bytes (needed for diff operations)
@@ -302,15 +308,13 @@
         data_sha256_hash = hashlib.sha256(data_blob).digest()
       data_length, data_offset = self.AddData(data_blob)
 
-    self.AddOperation(is_kernel, op_type, data_offset=data_offset,
+    self.AddOperation(part_name, op_type, data_offset=data_offset,
                       data_length=data_length, src_extents=src_extents,
                       src_length=src_length, dst_extents=dst_extents,
                       dst_length=dst_length, data_sha256_hash=data_sha256_hash)
 
   def WriteToFileWithData(self, file_obj, sigs_data=None,
-                          privkey_file_name=None,
-                          do_add_pseudo_operation=False,
-                          is_pseudo_in_kernel=False, padding=None):
+                          privkey_file_name=None, padding=None):
     """Writes the payload content to a file, optionally signing the content.
 
     Args:
@@ -319,10 +323,6 @@
                  payload signature fields assumed to be preset by the caller)
       privkey_file_name: key used for signing the payload (optional; used only
                          if explicit signatures blob not provided)
-      do_add_pseudo_operation: whether a pseudo-operation should be added to
-                               account for the signature blob
-      is_pseudo_in_kernel: whether the pseudo-operation should be added to
-                           kernel (True) or rootfs (False) operations
       padding: stuff to dump past the normal data blobs provided (optional)
 
     Raises:
@@ -343,17 +343,6 @@
       # Update the payload with proper signature attributes.
       self.SetSignatures(self.curr_offset, sigs_len)
 
-    # Add a pseudo-operation to account for the signature blob, if requested.
-    if do_add_pseudo_operation:
-      if not self.block_size:
-        raise TestError('cannot add pseudo-operation without knowing the '
-                        'payload block size')
-      self.AddOperation(
-          is_pseudo_in_kernel, common.OpType.REPLACE,
-          data_offset=self.curr_offset, data_length=sigs_len,
-          dst_extents=[(common.PSEUDO_EXTENT_MARKER,
-                        (sigs_len + self.block_size - 1) / self.block_size)])
-
     if do_generate_sigs_data:
       # Once all payload fields are updated, dump and sign it.
       temp_payload_file = cStringIO.StringIO()
diff --git a/scripts/update_payload/update_metadata_pb2.py b/scripts/update_payload/update_metadata_pb2.py
index 6275642..907cc18 100644
--- a/scripts/update_payload/update_metadata_pb2.py
+++ b/scripts/update_payload/update_metadata_pb2.py
@@ -20,7 +20,7 @@
   package='chromeos_update_engine',
   syntax='proto2',
   serialized_options=_b('H\003'),
-  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xd0\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\x8f\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xb1\x06\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
+  serialized_pb=_b('\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xee\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\x04\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\x04\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xad\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x0c\n\x04MOVE\x10\x02\x1a\x02\x08\x01\x12\x0e\n\x06\x42SDIFF\x10\x03\x1a\x02\x08\x01\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xc9\x06\n\x14\x44\x65ltaArchiveManifest\x12H\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12O\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperationB\x02\x18\x01\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12\x42\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x42\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfoB\x02\x18\x01\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x12T\n\x1a\x64ynamic_partition_metadata\x18\x0f \x01(\x0b\x32\x30.chromeos_update_engine.DynamicPartitionMetadataB\x02H\x03')
 )
 
 
@@ -40,38 +40,46 @@
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='SOURCE_COPY', index=2, number=4,
+      name='MOVE', index=2, number=2,
+      serialized_options=_b('\010\001'),
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='BSDIFF', index=3, number=3,
+      serialized_options=_b('\010\001'),
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='SOURCE_COPY', index=4, number=4,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='SOURCE_BSDIFF', index=3, number=5,
+      name='SOURCE_BSDIFF', index=5, number=5,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='REPLACE_XZ', index=4, number=8,
+      name='REPLACE_XZ', index=6, number=8,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='ZERO', index=5, number=6,
+      name='ZERO', index=7, number=6,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='DISCARD', index=6, number=7,
+      name='DISCARD', index=8, number=7,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='BROTLI_BSDIFF', index=7, number=10,
+      name='BROTLI_BSDIFF', index=9, number=10,
       serialized_options=None,
       type=None),
     _descriptor.EnumValueDescriptor(
-      name='PUFFDIFF', index=8, number=9,
+      name='PUFFDIFF', index=10, number=9,
       serialized_options=None,
       type=None),
   ],
   containing_type=None,
   serialized_options=None,
   serialized_start=712,
-  serialized_end=855,
+  serialized_end=885,
 )
 _sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE)
 
@@ -370,7 +378,7 @@
   oneofs=[
   ],
   serialized_start=391,
-  serialized_end=855,
+  serialized_end=885,
 )
 
 
@@ -505,8 +513,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=858,
-  serialized_end=1585,
+  serialized_start=888,
+  serialized_end=1615,
 )
 
 
@@ -550,8 +558,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1587,
-  serialized_end=1663,
+  serialized_start=1617,
+  serialized_end=1693,
 )
 
 
@@ -581,8 +589,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1665,
-  serialized_end=1754,
+  serialized_start=1695,
+  serialized_end=1784,
 )
 
 
@@ -599,14 +607,14 @@
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1,
       number=2, type=11, cpp_type=10, label=3,
       has_default_value=False, default_value=[],
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2,
       number=3, type=13, cpp_type=3, label=1,
@@ -634,28 +642,28 @@
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6,
       number=7, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7,
       number=8, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8,
       number=9, type=11, cpp_type=10, label=1,
       has_default_value=False, default_value=None,
       message_type=None, enum_type=None, containing_type=None,
       is_extension=False, extension_scope=None,
-      serialized_options=None, file=DESCRIPTOR),
+      serialized_options=_b('\030\001'), file=DESCRIPTOR),
     _descriptor.FieldDescriptor(
       name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9,
       number=10, type=11, cpp_type=10, label=1,
@@ -710,8 +718,8 @@
   extension_ranges=[],
   oneofs=[
   ],
-  serialized_start=1757,
-  serialized_end=2574,
+  serialized_start=1787,
+  serialized_end=2628,
 )
 
 _SIGNATURES_SIGNATURE.containing_type = _SIGNATURES
@@ -823,4 +831,12 @@
 
 
 DESCRIPTOR._options = None
+_INSTALLOPERATION_TYPE.values_by_name["MOVE"]._options = None
+_INSTALLOPERATION_TYPE.values_by_name["BSDIFF"]._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info']._options = None
+_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info']._options = None
 # @@protoc_insertion_point(module_scope)