paycheck: enforce physical partition size correctly

During payload checking, payload has wrongly interpreted the size
reported in the update payload to be the physical partition size,
whereas this is in fact the size of the filesystem portion only (a
misnomer). This sometimes caused it to emit errors on out-of-bounds
operations, which are otherwise harmless in real-world scenarios.

This CL makes a clear distinction between the two, with the following
semantics:

- The payload's embedded filesystem size must by <= the physical
  partition sizes.

- Reading/writing from/to the new partition must be within the physical
  partition size boundaries, and not the filesystem ones.

- Reading from the old partition is only allowed from filesystem
  boundaries; this is unchanged from current behavior and appears to be
  consistent with how we perform delta updates.

- Old/new SHA256 verification during payload application is now limited
  to the allotted filesystem portion only (and not the full partition
  size). This is consistent with the update engine's semantics.

- Other than that, this change currently has no further effect on
  payload application, which remains more permissive wrt to partition
  sizes.  This also means that the sizes of partitions resulting from
  a payload application will not necessarily abide by the predetermined
  physical partition sizes.  This is in line with the prevailing
  division of responsibilities between payload checking (strict) and
  application (relaxed).

BUG=chromium:221847
TEST=Payload checking respects partition size override
TEST=Unit tests pass
TEST=Integration tests pass

Change-Id: I0dbc88d538c0cc53b7551f4dfa8f543bcf480cd5
Reviewed-on: https://gerrit.chromium.org/gerrit/50103
Reviewed-by: Gilad Arnold <[email protected]>
Tested-by: Gilad Arnold <[email protected]>
Commit-Queue: David James <[email protected]>
diff --git a/scripts/paycheck.py b/scripts/paycheck.py
index eb88ec9..108c000 100755
--- a/scripts/paycheck.py
+++ b/scripts/paycheck.py
@@ -17,6 +17,10 @@
 import update_payload
 
 
+# The default sizes of partitions, based on current partitioning practice.
+_DEFAULT_ROOTFS_PART_SIZE = 2 * 1024 * 1024 * 1024
+_DEFAULT_KERNEL_PART_SIZE = 16 * 1024 * 1024
+
 _TYPE_FULL = 'full'
 _TYPE_DELTA = 'delta'
 
@@ -76,6 +80,15 @@
                         default=default_key)
   check_opts.add_option('-m', '--meta-sig', metavar='FILE',
                         help='verify metadata against its signature')
+  check_opts.add_option('-p', '--root-part-size', metavar='NUM',
+                        default=_DEFAULT_ROOTFS_PART_SIZE, type='int',
+                        help=('override default (%default) rootfs partition '
+                              'size'))
+  check_opts.add_option('-P', '--kern-part-size', metavar='NUM',
+                        default=_DEFAULT_KERNEL_PART_SIZE, type='int',
+                        help=('override default (%default) kernel partition '
+                              'size'))
+
   parser.add_option_group(check_opts)
 
   trace_opts = optparse.OptionGroup(parser, 'Block tracing')
@@ -109,7 +122,9 @@
   # There are several options that imply --check.
   opts.check = (opts.check or opts.report or opts.assert_type or
                 opts.block_size or opts.allow_unhashed or
-                opts.disabled_tests or opts.key or opts.meta_sig)
+                opts.disabled_tests or opts.key or opts.meta_sig or
+                opts.root_part_size != _DEFAULT_ROOTFS_PART_SIZE or
+                opts.kern_part_size != _DEFAULT_KERNEL_PART_SIZE)
 
   # Check number of arguments, enforce payload type accordingly.
   if len(args) == 3:
@@ -171,6 +186,8 @@
               report_out_file=report_file,
               assert_type=options.assert_type,
               block_size=int(options.block_size),
+              rootfs_part_size=options.root_part_size,
+              kernel_part_size=options.kern_part_size,
               allow_unhashed=options.allow_unhashed,
               disabled_tests=options.disabled_tests)
         finally:
diff --git a/scripts/update_payload/__init__.py b/scripts/update_payload/__init__.py
index eae23af..1906a16 100644
--- a/scripts/update_payload/__init__.py
+++ b/scripts/update_payload/__init__.py
@@ -6,6 +6,6 @@
 
 # Just raise the interface classes to the root namespace.
 # pylint: disable=W0401
+from checker import CHECKS_TO_DISABLE
 from error import PayloadError
 from payload import Payload
-from checker import CHECKS_TO_DISABLE
diff --git a/scripts/update_payload/applier.py b/scripts/update_payload/applier.py
index 6780e9a..3b7b1a6 100644
--- a/scripts/update_payload/applier.py
+++ b/scripts/update_payload/applier.py
@@ -28,25 +28,25 @@
 #
 # Helper functions.
 #
-def _VerifySha256(file_obj, expected_hash, name, max_length=-1):
+def _VerifySha256(file_obj, expected_hash, name, length=-1):
   """Verifies the SHA256 hash of a file.
 
   Args:
     file_obj: file object to read
     expected_hash: the hash digest we expect to be getting
     name: name string of this hash, for error reporting
-    max_length: maximum length of data to read/hash (optional)
+    length: precise length of data to verify (optional)
   Raises:
-    PayloadError if file hash fails to verify.
+    PayloadError if computed hash doesn't match expected one, or if fails to
+    read the specified length of data.
 
   """
   # pylint: disable=E1101
   hasher = hashlib.sha256()
   block_length = 1024 * 1024
-  if max_length < 0:
-    max_length = sys.maxint
+  max_length = length if length >= 0 else sys.maxint
 
-  while max_length != 0:
+  while max_length > 0:
     read_length = min(max_length, block_length)
     data = file_obj.read(read_length)
     if not data:
@@ -54,6 +54,11 @@
     max_length -= len(data)
     hasher.update(data)
 
+  if length >= 0 and max_length > 0:
+    raise PayloadError(
+        'insufficient data (%d instead of %d) when verifying %s' %
+        (length - max_length, length, name))
+
   actual_hash = hasher.digest()
   if actual_hash != expected_hash:
     raise PayloadError('%s hash (%s) not as expected (%s)' %
@@ -319,7 +324,8 @@
     if src_part_file_name:
       # Verify the source partition.
       with open(src_part_file_name, 'rb') as src_part_file:
-        _VerifySha256(src_part_file, src_part_info.hash, part_name)
+        _VerifySha256(src_part_file, src_part_info.hash, part_name,
+                      length=src_part_info.size)
 
       # Copy the src partition to the dst one.
       shutil.copyfile(src_part_file_name, dst_part_file_name)
@@ -335,7 +341,8 @@
 
     # Verify the resulting partition.
     with open(dst_part_file_name, 'rb') as dst_part_file:
-      _VerifySha256(dst_part_file, dst_part_info.hash, part_name)
+      _VerifySha256(dst_part_file, dst_part_info.hash, part_name,
+                    length=dst_part_info.size)
 
   def Run(self, dst_kernel_part, dst_rootfs_part, src_kernel_part=None,
           src_rootfs_part=None):
diff --git a/scripts/update_payload/checker.py b/scripts/update_payload/checker.py
index 703b166..e1b08a1 100644
--- a/scripts/update_payload/checker.py
+++ b/scripts/update_payload/checker.py
@@ -32,9 +32,9 @@
 _CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block'
 _CHECK_PAYLOAD_SIG = 'payload-sig'
 CHECKS_TO_DISABLE = (
-  _CHECK_DST_PSEUDO_EXTENTS,
-  _CHECK_MOVE_SAME_SRC_DST_BLOCK,
-  _CHECK_PAYLOAD_SIG,
+    _CHECK_DST_PSEUDO_EXTENTS,
+    _CHECK_MOVE_SAME_SRC_DST_BLOCK,
+    _CHECK_PAYLOAD_SIG,
 )
 
 _TYPE_FULL = 'full'
@@ -296,10 +296,10 @@
     # Reset state; these will be assigned when the manifest is checked.
     self.sigs_offset = 0
     self.sigs_size = 0
-    self.old_rootfs_size = 0
-    self.old_kernel_size = 0
-    self.new_rootfs_size = 0
-    self.new_kernel_size = 0
+    self.old_rootfs_fs_size = 0
+    self.old_kernel_fs_size = 0
+    self.new_rootfs_fs_size = 0
+    self.new_kernel_fs_size = 0
 
   @staticmethod
   def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str,
@@ -459,22 +459,24 @@
 
     """
     # Check: length <= num_blocks * block_size.
-    if not length <= num_blocks * block_size:
+    if length > num_blocks * block_size:
       raise PayloadError(
           '%s (%d) > num %sblocks (%d) * block_size (%d)' %
           (length_name, length, block_name or '', num_blocks, block_size))
 
     # Check: length > (num_blocks - 1) * block_size.
-    if not length > (num_blocks - 1) * block_size:
+    if length <= (num_blocks - 1) * block_size:
       raise PayloadError(
           '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d)' %
           (length_name, length, block_name or '', num_blocks - 1, block_size))
 
-  def _CheckManifest(self, report):
+  def _CheckManifest(self, report, rootfs_part_size=0, kernel_part_size=0):
     """Checks the payload manifest.
 
     Args:
       report: a report object to add to
+      rootfs_part_size: size of the rootfs partition in bytes
+      kernel_part_size: size of the kernel partition in bytes
     Returns:
       A tuple consisting of the partition block size used during the update
       (integer), the signatures block offset and size.
@@ -515,14 +517,24 @@
       self.payload_type = _TYPE_DELTA
 
       # Check: {size, hash} present in old_{kernel,rootfs}_info.
-      self.old_kernel_size = self._CheckMandatoryField(
+      self.old_kernel_fs_size = self._CheckMandatoryField(
           oki_msg, 'size', oki_report, 'old_kernel_info')
       self._CheckMandatoryField(oki_msg, 'hash', oki_report, 'old_kernel_info',
                                 convert=common.FormatSha256)
-      self.old_rootfs_size = self._CheckMandatoryField(
+      self.old_rootfs_fs_size = self._CheckMandatoryField(
           ori_msg, 'size', ori_report, 'old_rootfs_info')
       self._CheckMandatoryField(ori_msg, 'hash', ori_report, 'old_rootfs_info',
                                 convert=common.FormatSha256)
+
+      # Check: old_{kernel,rootfs} size must fit in respective partition.
+      if kernel_part_size and self.old_kernel_fs_size > kernel_part_size:
+        raise PayloadError(
+            'old kernel content (%d) exceed partition size (%d)' %
+            (self.old_kernel_fs_size, kernel_part_size))
+      if rootfs_part_size and self.old_rootfs_fs_size > rootfs_part_size:
+        raise PayloadError(
+            'old rootfs content (%d) exceed partition size (%d)' %
+            (self.old_rootfs_fs_size, rootfs_part_size))
     else:
       # Assert/mark full payload.
       if self.payload_type == _TYPE_DELTA:
@@ -533,7 +545,7 @@
     # Check: new_kernel_info present; contains {size, hash}.
     nki_msg, nki_report = self._CheckMandatorySubMsg(
         manifest, 'new_kernel_info', report, 'manifest')
-    self.new_kernel_size = self._CheckMandatoryField(
+    self.new_kernel_fs_size = self._CheckMandatoryField(
         nki_msg, 'size', nki_report, 'new_kernel_info')
     self._CheckMandatoryField(nki_msg, 'hash', nki_report, 'new_kernel_info',
                               convert=common.FormatSha256)
@@ -541,11 +553,21 @@
     # Check: new_rootfs_info present; contains {size, hash}.
     nri_msg, nri_report = self._CheckMandatorySubMsg(
         manifest, 'new_rootfs_info', report, 'manifest')
-    self.new_rootfs_size = self._CheckMandatoryField(
+    self.new_rootfs_fs_size = self._CheckMandatoryField(
         nri_msg, 'size', nri_report, 'new_rootfs_info')
     self._CheckMandatoryField(nri_msg, 'hash', nri_report, 'new_rootfs_info',
                               convert=common.FormatSha256)
 
+    # Check: new_{kernel,rootfs} size must fit in respective partition.
+    if kernel_part_size and self.new_kernel_fs_size > kernel_part_size:
+      raise PayloadError(
+          'new kernel content (%d) exceed partition size (%d)' %
+          (self.new_kernel_fs_size, kernel_part_size))
+    if rootfs_part_size and self.new_rootfs_fs_size > rootfs_part_size:
+      raise PayloadError(
+          'new rootfs content (%d) exceed partition size (%d)' %
+          (self.new_rootfs_fs_size, rootfs_part_size))
+
     # Check: payload must contain at least one operation.
     if not(len(manifest.install_operations) or
            len(manifest.kernel_install_operations)):
@@ -571,13 +593,13 @@
     self._CheckBlocksFitLength(length, total_blocks, self.block_size,
                                '%s: %s' % (op_name, length_name))
 
-  def _CheckExtents(self, extents, part_size, block_counters, name,
+  def _CheckExtents(self, extents, usable_size, block_counters, name,
                     allow_pseudo=False, allow_signature=False):
     """Checks a sequence of extents.
 
     Args:
       extents: the sequence of extents to check
-      part_size: the total size of the partition to which the extents apply
+      usable_size: the usable size of the partition to which the extents apply
       block_counters: an array of counters corresponding to the number of blocks
       name: the name of the extent block
       allow_pseudo: whether or not pseudo block numbers are allowed
@@ -603,10 +625,10 @@
 
       if start_block != common.PSEUDO_EXTENT_MARKER:
         # Check: make sure we're within the partition limit.
-        if part_size and end_block * self.block_size > part_size:
+        if usable_size and end_block * self.block_size > usable_size:
           raise PayloadError(
-              '%s: extent (%s) exceeds partition size (%d)' %
-              (ex_name, common.FormatExtent(ex, self.block_size), part_size))
+              '%s: extent (%s) exceeds usable partition size (%d)' %
+              (ex_name, common.FormatExtent(ex, self.block_size), usable_size))
 
         # Record block usage.
         for i in range(start_block, end_block):
@@ -750,7 +772,7 @@
            total_dst_blocks * self.block_size))
 
   def _CheckOperation(self, op, op_name, is_last, old_block_counters,
-                      new_block_counters, old_part_size, new_part_size,
+                      new_block_counters, old_fs_size, new_usable_size,
                       prev_data_offset, allow_signature, blob_hash_counts):
     """Checks a single update operation.
 
@@ -760,8 +782,8 @@
       is_last: whether this is the last operation in the sequence
       old_block_counters: arrays of block read counters
       new_block_counters: arrays of block write counters
-      old_part_size: the source partition size in bytes
-      new_part_size: the target partition size in bytes
+      old_fs_size: the old filesystem size in bytes
+      new_usable_size: the overall usable size of the new partition in bytes
       prev_data_offset: offset of last used data bytes
       allow_signature: whether this may be a signature operation
       blob_hash_counts: counters for hashed/unhashed blobs
@@ -773,12 +795,12 @@
     """
     # Check extents.
     total_src_blocks = self._CheckExtents(
-        op.src_extents, old_part_size, old_block_counters,
+        op.src_extents, old_fs_size, old_block_counters,
         op_name + '.src_extents', allow_pseudo=True)
     allow_signature_in_extents = (allow_signature and is_last and
                                   op.type == common.OpType.REPLACE)
     total_dst_blocks = self._CheckExtents(
-        op.dst_extents, new_part_size, new_block_counters,
+        op.dst_extents, new_usable_size, new_block_counters,
         op_name + '.dst_extents',
         allow_pseudo=(not self.check_dst_pseudo_extents),
         allow_signature=allow_signature_in_extents)
@@ -848,29 +870,34 @@
 
     return data_length if data_length is not None else 0
 
-  def _AllocBlockCounters(self, part_size):
+  def _SizeToNumBlocks(self, size):
+    """Returns the number of blocks needed to contain a given byte size."""
+    return (size + self.block_size - 1) / self.block_size
+
+  def _AllocBlockCounters(self, total_size):
     """Returns a freshly initialized array of block counters.
 
     Args:
-      part_size: the size of the partition
+      total_size: the total block size in bytes
     Returns:
       An array of unsigned char elements initialized to zero, one for each of
       the blocks necessary for containing the partition.
 
     """
-    num_blocks = (part_size + self.block_size - 1) / self.block_size
-    return array.array('B', [0] * num_blocks)
+    return array.array('B', [0] * self._SizeToNumBlocks(total_size))
 
-  def _CheckOperations(self, operations, report, base_name, old_part_size,
-                       new_part_size, prev_data_offset, allow_signature):
+  def _CheckOperations(self, operations, report, base_name, old_fs_size,
+                       new_fs_size, new_usable_size, prev_data_offset,
+                       allow_signature):
     """Checks a sequence of update operations.
 
     Args:
       operations: the sequence of operations to check
       report: the report object to add to
       base_name: the name of the operation block
-      old_part_size: the old partition size in bytes
-      new_part_size: the new partition size in bytes
+      old_fs_size: the old filesystem size in bytes
+      new_fs_size: the new filesystem size in bytes
+      new_usable_size: the olverall usable size of the new partition in bytes
       prev_data_offset: offset of last used data bytes
       allow_signature: whether this sequence may contain signature operations
     Returns:
@@ -904,9 +931,9 @@
       blob_hash_counts['signature'] = 0
 
     # Allocate old and new block counters.
-    old_block_counters = (self._AllocBlockCounters(old_part_size)
-                          if old_part_size else None)
-    new_block_counters = self._AllocBlockCounters(new_part_size)
+    old_block_counters = (self._AllocBlockCounters(old_fs_size)
+                          if old_fs_size else None)
+    new_block_counters = self._AllocBlockCounters(new_usable_size)
 
     # Process and verify each operation.
     op_num = 0
@@ -921,7 +948,7 @@
       is_last = op_num == len(operations)
       curr_data_used = self._CheckOperation(
           op, op_name, is_last, old_block_counters, new_block_counters,
-          old_part_size, new_part_size, prev_data_offset + total_data_used,
+          old_fs_size, new_usable_size, prev_data_offset + total_data_used,
           allow_signature, blob_hash_counts)
       if curr_data_used:
         op_blob_totals[op.type] += curr_data_used
@@ -952,16 +979,17 @@
                       histogram.Histogram.FromKeyList(old_block_counters),
                       linebreak=True, indent=1)
 
-    new_write_hist = histogram.Histogram.FromKeyList(new_block_counters)
+    new_write_hist = histogram.Histogram.FromKeyList(
+        new_block_counters[:self._SizeToNumBlocks(new_fs_size)])
+    report.AddField('block write hist', new_write_hist, linebreak=True,
+                    indent=1)
+
     # Check: full update must write each dst block once.
     if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]:
       raise PayloadError(
           '%s: not all blocks written exactly once during full update' %
           base_name)
 
-    report.AddField('block write hist', new_write_hist, linebreak=True,
-                    indent=1)
-
     return total_data_used
 
   def _CheckSignatures(self, report, pubkey_file_name):
@@ -1014,12 +1042,16 @@
         raise PayloadError('unknown signature version (%d)' % sig.version)
 
   def Run(self, pubkey_file_name=None, metadata_sig_file=None,
-          report_out_file=None):
+          rootfs_part_size=0, kernel_part_size=0, report_out_file=None):
     """Checker entry point, invoking all checks.
 
     Args:
       pubkey_file_name: public key used for signature verification
       metadata_sig_file: metadata signature, if verification is desired
+      rootfs_part_size: the size of rootfs partitions in bytes (default: use
+                        reported filesystem size)
+      kernel_part_size: the size of kernel partitions in bytes (default: use
+                        reported filesystem size)
       report_out_file: file object to dump the report to
     Raises:
       PayloadError if payload verification failed.
@@ -1053,22 +1085,26 @@
       report.AddField('manifest len', self.payload.header.manifest_len)
 
       # Part 2: check the manifest.
-      self._CheckManifest(report)
+      self._CheckManifest(report, rootfs_part_size, kernel_part_size)
       assert self.payload_type, 'payload type should be known by now'
 
       # Part 3: examine rootfs operations.
       report.AddSection('rootfs operations')
       total_blob_size = self._CheckOperations(
           self.payload.manifest.install_operations, report,
-          'install_operations', self.old_rootfs_size, self.new_rootfs_size, 0,
-          False)
+          'install_operations', self.old_rootfs_fs_size,
+          self.new_rootfs_fs_size,
+          rootfs_part_size if rootfs_part_size else self.new_rootfs_fs_size,
+          0, False)
 
       # Part 4: examine kernel operations.
       report.AddSection('kernel operations')
       total_blob_size += self._CheckOperations(
           self.payload.manifest.kernel_install_operations, report,
-          'kernel_install_operations', self.old_kernel_size,
-          self.new_kernel_size, total_blob_size, True)
+          'kernel_install_operations', self.old_kernel_fs_size,
+          self.new_kernel_fs_size,
+          kernel_part_size if kernel_part_size else self.new_kernel_fs_size,
+          total_blob_size, True)
 
       # Check: operations data reach the end of the payload file.
       used_payload_size = self.payload.data_offset + total_blob_size
diff --git a/scripts/update_payload/checker_unittest.py b/scripts/update_payload/checker_unittest.py
index 8d134fc..c7a291b 100755
--- a/scripts/update_payload/checker_unittest.py
+++ b/scripts/update_payload/checker_unittest.py
@@ -414,7 +414,9 @@
 
   def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs,
                           fail_mismatched_oki_ori, fail_bad_oki, fail_bad_ori,
-                          fail_bad_nki, fail_bad_nri, fail_missing_ops):
+                          fail_bad_nki, fail_bad_nri, fail_missing_ops,
+                          fail_old_kernel_fs_size, fail_old_rootfs_fs_size,
+                          fail_new_kernel_fs_size, fail_new_rootfs_fs_size):
     """Parametric testing of _CheckManifest().
 
     Args:
@@ -426,6 +428,10 @@
       fail_bad_nki: tamper with new kernel info
       fail_bad_nri: tamper with new rootfs info
       fail_missing_ops: simulate a manifest without any operations
+      fail_old_kernel_fs_size: make old kernel fs size too big
+      fail_old_rootfs_fs_size: make old rootfs fs size too big
+      fail_new_kernel_fs_size: make new kernel fs size too big
+      fail_new_rootfs_fs_size: make new rootfs fs size too big
 
     """
     # Generate a test payload. For this test, we only care about the manifest
@@ -452,20 +458,37 @@
     if fail_bad_sigs:
       payload_gen.SetSignatures(32, None)
 
+    # Set partition / filesystem sizes.
+    rootfs_part_size = _MiB(8)
+    kernel_part_size = _KiB(512)
+    old_rootfs_fs_size = new_rootfs_fs_size = rootfs_part_size
+    old_kernel_fs_size = new_kernel_fs_size = kernel_part_size
+    if fail_old_kernel_fs_size:
+      old_kernel_fs_size += 100
+    if fail_old_rootfs_fs_size:
+      old_rootfs_fs_size += 100
+    if fail_new_kernel_fs_size:
+      new_kernel_fs_size += 100
+    if fail_new_rootfs_fs_size:
+      new_rootfs_fs_size += 100
+
     # Add old kernel/rootfs partition info, as required.
-    if fail_mismatched_oki_ori or fail_bad_oki:
+    if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:
       oki_hash = (None if fail_bad_oki
                   else hashlib.sha256('fake-oki-content').digest())
-      payload_gen.SetPartInfo(True, False, _KiB(512), oki_hash)
-    if not fail_mismatched_oki_ori and fail_bad_ori:
-      payload_gen.SetPartInfo(False, False, _MiB(8), None)
+      payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)
+    if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or
+                                        fail_bad_ori):
+      ori_hash = (None if fail_bad_ori
+                  else hashlib.sha256('fake-ori-content').digest())
+      payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)
 
     # Add new kernel/rootfs partition info.
     payload_gen.SetPartInfo(
-        True, True, _KiB(512),
+        True, True, new_kernel_fs_size,
         None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())
     payload_gen.SetPartInfo(
-        False, True, _MiB(8),
+        False, True, new_rootfs_fs_size,
         None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())
 
     # Create the test object.
@@ -474,12 +497,17 @@
 
     should_fail = (fail_mismatched_block_size or fail_bad_sigs or
                    fail_mismatched_oki_ori or fail_bad_oki or fail_bad_ori or
-                   fail_bad_nki or fail_bad_nri or fail_missing_ops)
+                   fail_bad_nki or fail_bad_nri or fail_missing_ops or
+                   fail_old_kernel_fs_size or fail_old_rootfs_fs_size or
+                   fail_new_kernel_fs_size or fail_new_rootfs_fs_size)
     if should_fail:
       self.assertRaises(update_payload.PayloadError,
-                        payload_checker._CheckManifest, report)
+                        payload_checker._CheckManifest, report,
+                        rootfs_part_size, kernel_part_size)
     else:
-      self.assertIsNone(payload_checker._CheckManifest(report))
+      self.assertIsNone(payload_checker._CheckManifest(report,
+                                                       rootfs_part_size,
+                                                       kernel_part_size))
 
   def testCheckLength(self):
     """Tests _CheckLength()."""
@@ -934,7 +962,7 @@
 
     should_fail = (fail_bad_type or fail_nonexhaustive_full_update)
     largs = (payload_checker.payload.manifest.install_operations, report,
-             'foo', 0, rootfs_part_size, 0, False)
+             'foo', 0, rootfs_part_size, rootfs_part_size, 0, False)
     if should_fail:
       self.assertRaises(update_payload.PayloadError,
                         payload_checker._CheckOperations, *largs)
@@ -952,9 +980,10 @@
     block_size = _KiB(4)
     payload_gen.SetBlockSize(block_size)
     rootfs_part_size = _MiB(2)
+    kernel_part_size = _KiB(16)
     payload_gen.SetPartInfo(False, True, rootfs_part_size,
                             hashlib.sha256('fake-new-rootfs-content').digest())
-    payload_gen.SetPartInfo(True, True, _KiB(16),
+    payload_gen.SetPartInfo(True, True, kernel_part_size,
                             hashlib.sha256('fake-new-kernel-content').digest())
     payload_gen.AddOperationWithData(
         False, common.OpType.REPLACE,
@@ -1000,7 +1029,7 @@
     report = checker._PayloadReport()
 
     # We have to check the manifest first in order to set signature attributes.
-    payload_checker._CheckManifest(report)
+    payload_checker._CheckManifest(report, rootfs_part_size, kernel_part_size)
 
     should_fail = (fail_empty_sigs_blob or fail_missing_pseudo_op or
                    fail_mismatched_pseudo_op or fail_sig_missing_fields or
@@ -1165,7 +1194,11 @@
                       'fail_bad_ori': (True, False),
                       'fail_bad_nki': (True, False),
                       'fail_bad_nri': (True, False),
-                      'fail_missing_ops': (True, False)})
+                      'fail_missing_ops': (True, False),
+                      'fail_old_kernel_fs_size': (True, False),
+                      'fail_old_rootfs_fs_size': (True, False),
+                      'fail_new_kernel_fs_size': (True, False),
+                      'fail_new_rootfs_fs_size': (True, False)})
 
   # Add all _CheckOperation() test cases.
   AddParametricTests('CheckOperation',
diff --git a/scripts/update_payload/common.py b/scripts/update_payload/common.py
index 6b5dbad..a869730 100644
--- a/scripts/update_payload/common.py
+++ b/scripts/update_payload/common.py
@@ -46,7 +46,7 @@
 
 
 #
-# Checker and hashed reading of data.
+# Checked and hashed reading of data.
 #
 def IntPackingFmtStr(size, is_unsigned):
   """Returns an integer format string for use by the struct module.
diff --git a/scripts/update_payload/payload.py b/scripts/update_payload/payload.py
index b4760b2..e432092 100644
--- a/scripts/update_payload/payload.py
+++ b/scripts/update_payload/payload.py
@@ -175,7 +175,8 @@
 
   def Check(self, pubkey_file_name=None, metadata_sig_file=None,
             report_out_file=None, assert_type=None, block_size=0,
-            allow_unhashed=False, disabled_tests=()):
+            rootfs_part_size=0, kernel_part_size=0, allow_unhashed=False,
+            disabled_tests=()):
     """Checks the payload integrity.
 
     Args:
@@ -184,6 +185,8 @@
       report_out_file: file object to dump the report to
       assert_type: assert that payload is either 'full' or 'delta'
       block_size: expected filesystem / payload block size
+      rootfs_part_size: the size of (physical) rootfs partitions in bytes
+      kernel_part_size: the size of (physical) kernel partitions in bytes
       allow_unhashed: allow unhashed operation blobs
       disabled_tests: list of tests to disable
     Raises:
@@ -198,6 +201,8 @@
         allow_unhashed=allow_unhashed, disabled_tests=disabled_tests)
     helper.Run(pubkey_file_name=pubkey_file_name,
                metadata_sig_file=metadata_sig_file,
+               rootfs_part_size=rootfs_part_size,
+               kernel_part_size=kernel_part_size,
                report_out_file=report_out_file)
 
   def Apply(self, dst_kernel_part, dst_rootfs_part, src_kernel_part=None,