fork chromite modules here

Autotest is stuck on Python 2.  Fork the unused modules in here to
untangle the dependency on chromite.  If Autotest every migrates to
Python 3, it can move back to chromite.

The fork is imported as utils/frozen_chromite/ and can be imported there.

BUG=chromium:1049711
TEST=CQ passes

Change-Id: I58d7ae3802750d6c53ec0a0f794cb5664364f211
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/2393472
Reviewed-by: Derek Beckett <[email protected]>
Commit-Queue: Mike Frysinger <[email protected]>
Tested-by: Mike Frysinger <[email protected]>
diff --git a/cli/server.py b/cli/server.py
index de3435f..0cfb92c 100644
--- a/cli/server.py
+++ b/cli/server.py
@@ -33,7 +33,7 @@
 from autotest_lib.frontend import setup_django_environment
 from autotest_lib.site_utils import server_manager
 from autotest_lib.site_utils import server_manager_utils
-from chromite.lib import gob_util
+from autotest_lib.utils.frozen_chromite.lib import gob_util
 
 try:
     from skylab_inventory import text_manager
diff --git a/cli/skylab_utils.py b/cli/skylab_utils.py
index 5fb7c6a..76d3951 100644
--- a/cli/skylab_utils.py
+++ b/cli/skylab_utils.py
@@ -10,7 +10,7 @@
 import common
 
 from autotest_lib.client.common_lib import revision_control
-from chromite.lib import gob_util
+from autotest_lib.utils.frozen_chromite.lib import gob_util
 
 try:
     from skylab_inventory import text_manager
diff --git a/client/bin/result_tools/runner.py b/client/bin/result_tools/runner.py
index d8802cb..75533f5 100644
--- a/client/bin/result_tools/runner.py
+++ b/client/bin/result_tools/runner.py
@@ -19,7 +19,7 @@
 from autotest_lib.client.common_lib import utils as client_utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = client_utils.metrics_mock
 
diff --git a/client/common_lib/cros/dev_server.py b/client/common_lib/cros/dev_server.py
index b2dbb46..d4f7553 100644
--- a/client/common_lib/cros/dev_server.py
+++ b/client/common_lib/cros/dev_server.py
@@ -31,7 +31,7 @@
 # TODO(cmasone): redo this class using requests module; http://crosbug.com/30107
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/client/common_lib/cros/vpd_utils.py b/client/common_lib/cros/vpd_utils.py
index cd7cb2d..7095260 100644
--- a/client/common_lib/cros/vpd_utils.py
+++ b/client/common_lib/cros/vpd_utils.py
@@ -3,7 +3,7 @@
 # found in the LICENSE file.
 
 from autotest_lib.client.common_lib import error
-from chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import retry_util
 
 
 _VPD_BASE_CMD = 'vpd -i %s %s %s'
diff --git a/client/common_lib/hosts/repair.py b/client/common_lib/hosts/repair.py
index 88d9c65..e785106 100644
--- a/client/common_lib/hosts/repair.py
+++ b/client/common_lib/hosts/repair.py
@@ -30,7 +30,7 @@
 from autotest_lib.client.common_lib import error
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     from autotest_lib.client.bin.utils import metrics_mock as metrics
 
diff --git a/client/common_lib/test.py b/client/common_lib/test.py
index 5898e62..c68adbc 100644
--- a/client/common_lib/test.py
+++ b/client/common_lib/test.py
@@ -44,7 +44,7 @@
 from autotest_lib.client.common_lib import utils as client_utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = client_utils.metrics_mock
 
diff --git a/server/autoserv b/server/autoserv
index 8eb4218..ad6f1c3 100755
--- a/server/autoserv
+++ b/server/autoserv
@@ -34,8 +34,8 @@
 from autotest_lib.server.cros.dynamic_suite import suite
 
 try:
-    from chromite.lib import metrics
-    from chromite.lib import cloud_trace
+    from autotest_lib.utils.frozen_chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import cloud_trace
 except ImportError:
     from autotest_lib.client.common_lib import utils as common_utils
     metrics = common_utils.metrics_mock
diff --git a/server/autotest.py b/server/autotest.py
index d195bd0..34ad588 100644
--- a/server/autotest.py
+++ b/server/autotest.py
@@ -32,7 +32,7 @@
 
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = client_utils.metrics_mock
 
diff --git a/server/control_segments/cleanup b/server/control_segments/cleanup
index 9e57a60..cbdfbe6 100644
--- a/server/control_segments/cleanup
+++ b/server/control_segments/cleanup
@@ -6,7 +6,7 @@
 from autotest_lib.server.cros import provision
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/control_segments/provision b/server/control_segments/provision
index 05f10e9..eb9cb89 100644
--- a/server/control_segments/provision
+++ b/server/control_segments/provision
@@ -11,7 +11,7 @@
 from autotest_lib.server.cros import provision
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/control_segments/reset b/server/control_segments/reset
index e54550f..230e3dd 100644
--- a/server/control_segments/reset
+++ b/server/control_segments/reset
@@ -4,7 +4,7 @@
 from autotest_lib.server.cros import provision
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/control_segments/verify b/server/control_segments/verify
index 88723d4..0128dd7 100644
--- a/server/control_segments/verify
+++ b/server/control_segments/verify
@@ -2,7 +2,7 @@
 from autotest_lib.server.cros import provision
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/crashcollect.py b/server/crashcollect.py
index cf64428..bcc0488 100644
--- a/server/crashcollect.py
+++ b/server/crashcollect.py
@@ -16,7 +16,7 @@
 from autotest_lib.server import utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/cros/dynamic_suite/frontend_wrappers.py b/server/cros/dynamic_suite/frontend_wrappers.py
index 4088e1a..1d2f7e7 100644
--- a/server/cros/dynamic_suite/frontend_wrappers.py
+++ b/server/cros/dynamic_suite/frontend_wrappers.py
@@ -14,17 +14,18 @@
 from autotest_lib.frontend.afe.json_rpc import proxy
 from autotest_lib.server import frontend
 try:
-    from chromite.lib import retry_util
-    from chromite.lib import timeout_util
-except ImportError:
-    logging.warn('Unable to import chromite.')
+    from autotest_lib.utils.frozen_chromite.lib import retry_util
+    from autotest_lib.utils.frozen_chromite.lib import timeout_util
+except ImportError as e:
+    logging.warn('Unable to import chromite: %s', e)
     retry_util = None
     timeout_util = None
 
 try:
-    from chromite.lib import metrics
-except ImportError:
-    logging.warn('Unable to import metrics from chromite.')
+    from autotest_lib.utils.frozen_chromite.lib import metrics
+except ImportError as e:
+    logging.warn('Unable to import metrics from '
+                 'autotest_lib.utils.frozen_chromite: %s', e)
     metrics = utils.metrics_mock
 
 
diff --git a/server/cros/dynamic_suite/reporting.py b/server/cros/dynamic_suite/reporting.py
index f0f10d8..6e3b2df 100644
--- a/server/cros/dynamic_suite/reporting.py
+++ b/server/cros/dynamic_suite/reporting.py
@@ -15,7 +15,7 @@
 from autotest_lib.site_utils import gmail_lib
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = site_utils.metrics_mock
 
diff --git a/server/cros/network/attenuator_controller.py b/server/cros/network/attenuator_controller.py
index 90d91f3..d6322d6 100644
--- a/server/cros/network/attenuator_controller.py
+++ b/server/cros/network/attenuator_controller.py
@@ -8,7 +8,7 @@
 from autotest_lib.server.cros.network import attenuator
 from autotest_lib.server.cros.network import attenuator_hosts
 
-from chromite.lib import timeout_util
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
 
 HOST_TO_FIXED_ATTENUATIONS = attenuator_hosts.HOST_FIXED_ATTENUATIONS
 
diff --git a/server/cros/provisioner.py b/server/cros/provisioner.py
index e40b247..47bb134 100644
--- a/server/cros/provisioner.py
+++ b/server/cros/provisioner.py
@@ -21,7 +21,7 @@
 from autotest_lib.server.cros.dynamic_suite import tools
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/cros/update_engine/update_engine_test.py b/server/cros/update_engine/update_engine_test.py
index 8d734f7..25aaa93 100644
--- a/server/cros/update_engine/update_engine_test.py
+++ b/server/cros/update_engine/update_engine_test.py
@@ -28,10 +28,10 @@
 from autotest_lib.server import autotest
 from autotest_lib.server import test
 from autotest_lib.server.cros.dynamic_suite import tools
-from chromite.lib import auto_updater
-from chromite.lib import auto_updater_transfer
-from chromite.lib import remote_access
-from chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import auto_updater
+from autotest_lib.utils.frozen_chromite.lib import auto_updater_transfer
+from autotest_lib.utils.frozen_chromite.lib import remote_access
+from autotest_lib.utils.frozen_chromite.lib import retry_util
 
 
 class UpdateEngineTest(test.test, update_engine_util.UpdateEngineUtil):
diff --git a/server/frontend.py b/server/frontend.py
index e331fe5..71f661f 100644
--- a/server/frontend.py
+++ b/server/frontend.py
@@ -36,7 +36,7 @@
 from six.moves import zip
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/hosts/abstract_ssh.py b/server/hosts/abstract_ssh.py
index a641b69..61b2402 100644
--- a/server/hosts/abstract_ssh.py
+++ b/server/hosts/abstract_ssh.py
@@ -25,7 +25,7 @@
 from six.moves import filter
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/hosts/cros_host.py b/server/hosts/cros_host.py
index 35035ae..4c44c21 100644
--- a/server/hosts/cros_host.py
+++ b/server/hosts/cros_host.py
@@ -52,7 +52,7 @@
 # In case cros_host is being ran via SSP on an older Moblab version with an
 # older chromite version.
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/hosts/cros_repair.py b/server/hosts/cros_repair.py
index 8956b95..02d4ab1 100644
--- a/server/hosts/cros_repair.py
+++ b/server/hosts/cros_repair.py
@@ -32,7 +32,7 @@
 from six.moves import range
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/hosts/file_store.py b/server/hosts/file_store.py
index c85c7c5..0221264 100644
--- a/server/hosts/file_store.py
+++ b/server/hosts/file_store.py
@@ -7,8 +7,8 @@
 
 import common
 from autotest_lib.server.hosts import host_info
-from chromite.lib import locking
-from chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import locking
+from autotest_lib.utils.frozen_chromite.lib import retry_util
 
 
 _FILE_LOCK_TIMEOUT_SECONDS = 5
diff --git a/server/hosts/file_store_unittest.py b/server/hosts/file_store_unittest.py
index 1e86eee..0b2e73c 100644
--- a/server/hosts/file_store_unittest.py
+++ b/server/hosts/file_store_unittest.py
@@ -11,7 +11,7 @@
 from autotest_lib.client.common_lib import autotemp
 from autotest_lib.server.hosts import file_store
 from autotest_lib.server.hosts import host_info
-from chromite.lib import locking
+from autotest_lib.utils.frozen_chromite.lib import locking
 
 class FileStoreTestCase(unittest.TestCase):
     """Test file_store.FileStore functionality."""
@@ -119,7 +119,8 @@
             store.get(force_refresh=True)
 
 
-    @mock.patch('chromite.lib.locking.FileLock', autospec=True)
+    @mock.patch('autotest_lib.utils.frozen_chromite.lib.locking.FileLock',
+                autospec=True)
     def test_commit_succeeds_after_lock_retry(self, mock_file_lock_class):
         """Tests that commit succeeds when locking requires retries.
 
@@ -139,7 +140,8 @@
         self.assertEqual(2, mock_file_lock.write_lock.call_count)
 
 
-    @mock.patch('chromite.lib.locking.FileLock', autospec=True)
+    @mock.patch('autotest_lib.utils.frozen_chromite.lib.locking.FileLock',
+                autospec=True)
     def test_refresh_succeeds_after_lock_retry(self, mock_file_lock_class):
         """Tests that refresh succeeds when locking requires retries.
 
@@ -164,7 +166,8 @@
         self.assertEqual(4, mock_file_lock.write_lock.call_count)
 
 
-    @mock.patch('chromite.lib.locking.FileLock', autospec=True)
+    @mock.patch('autotest_lib.utils.frozen_chromite.lib.locking.FileLock',
+                autospec=True)
     def test_commit_with_negative_timeout_clips(self, mock_file_lock_class):
         """Commit request with negative timeout is same as 0 timeout.
 
diff --git a/server/hosts/gce_host.py b/server/hosts/gce_host.py
index cdfe9da..448bc00 100644
--- a/server/hosts/gce_host.py
+++ b/server/hosts/gce_host.py
@@ -7,7 +7,7 @@
 import string
 
 import common
-from chromite.lib import gce
+from autotest_lib.utils.frozen_chromite.lib import gce
 
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.common_lib import lsbrelease_utils
diff --git a/server/hosts/moblab_host.py b/server/hosts/moblab_host.py
index 85f1158..b53216a 100644
--- a/server/hosts/moblab_host.py
+++ b/server/hosts/moblab_host.py
@@ -19,7 +19,7 @@
 from autotest_lib.server.hosts import cros_host
 from autotest_lib.server.hosts import cros_repair
 
-from chromite.lib import timeout_util
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
 import six
 
 AUTOTEST_INSTALL_DIR = global_config.global_config.get_config_value(
diff --git a/server/hosts/servo_host.py b/server/hosts/servo_host.py
index 13f202d..8e873d1 100644
--- a/server/hosts/servo_host.py
+++ b/server/hosts/servo_host.py
@@ -41,7 +41,7 @@
 from autotest_lib.server.cros.servo.topology import servo_topology
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/hosts/servo_repair.py b/server/hosts/servo_repair.py
index e9db77c..b6db8d4 100644
--- a/server/hosts/servo_repair.py
+++ b/server/hosts/servo_repair.py
@@ -27,7 +27,7 @@
 import six
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/hosts/shadowing_store.py b/server/hosts/shadowing_store.py
index 9e53daa..be43081 100644
--- a/server/hosts/shadowing_store.py
+++ b/server/hosts/shadowing_store.py
@@ -9,7 +9,7 @@
 
 import common
 from autotest_lib.server.hosts import host_info
-from chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import metrics
 
 
 _METRICS_PREFIX = 'chromeos/autotest/autoserv/host_info/shadowing_store/'
diff --git a/server/hosts/ssh_host.py b/server/hosts/ssh_host.py
index 1737e0e..1e30c54 100644
--- a/server/hosts/ssh_host.py
+++ b/server/hosts/ssh_host.py
@@ -30,7 +30,7 @@
 # In case cros_host is being ran via SSP on an older Moblab version with an
 # older chromite version.
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/server_job.py b/server/server_job.py
index 25488dd..f82a179 100644
--- a/server/server_job.py
+++ b/server/server_job.py
@@ -64,7 +64,7 @@
 from six.moves import zip
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/site_crashcollect.py b/server/site_crashcollect.py
index b4ac6a0..6f0fa76 100644
--- a/server/site_crashcollect.py
+++ b/server/site_crashcollect.py
@@ -15,7 +15,7 @@
 from autotest_lib.server import utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = client_utils.metrics_mock
 
diff --git a/server/site_tests/autoupdate_P2P/autoupdate_P2P.py b/server/site_tests/autoupdate_P2P/autoupdate_P2P.py
index 536ccb1..efe53e4 100644
--- a/server/site_tests/autoupdate_P2P/autoupdate_P2P.py
+++ b/server/site_tests/autoupdate_P2P/autoupdate_P2P.py
@@ -10,7 +10,7 @@
 from autotest_lib.client.common_lib import utils
 from autotest_lib.server.cros.dynamic_suite import tools
 from autotest_lib.server.cros.update_engine import update_engine_test
-from chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import retry_util
 
 class autoupdate_P2P(update_engine_test.UpdateEngineTest):
     """Tests a peer to peer (P2P) autoupdate."""
diff --git a/server/site_tests/provision_QuickProvision/provision_QuickProvision.py b/server/site_tests/provision_QuickProvision/provision_QuickProvision.py
index 2a11ac5..e2dc385 100644
--- a/server/site_tests/provision_QuickProvision/provision_QuickProvision.py
+++ b/server/site_tests/provision_QuickProvision/provision_QuickProvision.py
@@ -17,7 +17,7 @@
 from autotest_lib.server.cros import provisioner
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/server/site_utils.py b/server/site_utils.py
index 1ea764c..43a14b0 100644
--- a/server/site_utils.py
+++ b/server/site_utils.py
@@ -39,7 +39,7 @@
 from autotest_lib.server.cros.dynamic_suite import job_status
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
@@ -723,9 +723,9 @@
         # 1-2 seconds to the module import time and most users of site_utils
         # don't need it. The correct fix is to break apart site_utils into more
         # meaningful chunks.
-        from chromite.lib import ts_mon_config
-    except ImportError:
-        logging.warn('Unable to import chromite. Monarch is disabled.')
+        from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
+    except ImportError as e:
+        logging.warn('Unable to import chromite. Monarch is disabled: %s', e)
         return TrivialContextManager()
 
     try:
diff --git a/site_utils/admin_audit/servo_updater.py b/site_utils/admin_audit/servo_updater.py
index f396557..7f707b9 100644
--- a/site_utils/admin_audit/servo_updater.py
+++ b/site_utils/admin_audit/servo_updater.py
@@ -10,7 +10,7 @@
 from autotest_lib.client.common_lib import utils as client_utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = client_utils.metrics_mock
 
diff --git a/site_utils/admin_audit/verifiers.py b/site_utils/admin_audit/verifiers.py
index ad4da9c..4313b10 100644
--- a/site_utils/admin_audit/verifiers.py
+++ b/site_utils/admin_audit/verifiers.py
@@ -20,7 +20,7 @@
 from autotest_lib.site_utils.admin_audit import rpm_validator
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = client_utils.metrics_mock
 
diff --git a/site_utils/backup_mysql_db.py b/site_utils/backup_mysql_db.py
index a90c219..281c276 100755
--- a/site_utils/backup_mysql_db.py
+++ b/site_utils/backup_mysql_db.py
@@ -32,8 +32,8 @@
 from autotest_lib.client.common_lib import global_config
 from autotest_lib.client.common_lib import utils
 
-from chromite.lib import metrics
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 from six.moves import range
 
 _ATTEMPTS = 3
diff --git a/site_utils/balance_pools.py b/site_utils/balance_pools.py
index a5db6f9..f09a138 100755
--- a/site_utils/balance_pools.py
+++ b/site_utils/balance_pools.py
@@ -68,8 +68,8 @@
 from autotest_lib.server.lib import status_history
 from autotest_lib.site_utils import lab_inventory
 from autotest_lib.utils import labellib
-from chromite.lib import metrics
-from chromite.lib import parallel
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import parallel
 
 #This must be imported after chromite.lib.metrics
 from infra_libs import ts_mon
diff --git a/site_utils/check_hung_proc.py b/site_utils/check_hung_proc.py
index 268dcba..8e14bb5 100755
--- a/site_utils/check_hung_proc.py
+++ b/site_utils/check_hung_proc.py
@@ -22,7 +22,7 @@
 from autotest_lib.server import site_utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = site_utils.metrics_mock
 
diff --git a/site_utils/cleanup_tko_db.py b/site_utils/cleanup_tko_db.py
index e8698de..4ce80b8 100755
--- a/site_utils/cleanup_tko_db.py
+++ b/site_utils/cleanup_tko_db.py
@@ -16,8 +16,8 @@
 from autotest_lib.client.common_lib import global_config
 from autotest_lib.client.common_lib import logging_config
 
-from chromite.lib import metrics
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 
 
 CONFIG = global_config.global_config
diff --git a/site_utils/count_jobs.py b/site_utils/count_jobs.py
index 798f431..299e464 100755
--- a/site_utils/count_jobs.py
+++ b/site_utils/count_jobs.py
@@ -16,7 +16,7 @@
 from autotest_lib.server import site_utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = site_utils.metrics_mock
 
diff --git a/site_utils/dump_suite_report.py b/site_utils/dump_suite_report.py
index 1e9b30f..8cf62f8 100755
--- a/site_utils/dump_suite_report.py
+++ b/site_utils/dump_suite_report.py
@@ -14,7 +14,7 @@
 
 from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
 from autotest_lib.server.lib import suite_report
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 
 def GetParser():
     """Creates the argparse parser."""
diff --git a/site_utils/gmail_lib.py b/site_utils/gmail_lib.py
index f5df276..6b8c0a7 100755
--- a/site_utils/gmail_lib.py
+++ b/site_utils/gmail_lib.py
@@ -45,10 +45,10 @@
 # Note: These imports needs to come after the apiclient imports, because
 # of a sys.path war between chromite and autotest crbug.com/622988
 from autotest_lib.server import utils as server_utils
-from chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import retry_util
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/site_utils/gs_offloader.py b/site_utils/gs_offloader.py
index f4c77ff..d7318fb 100755
--- a/site_utils/gs_offloader.py
+++ b/site_utils/gs_offloader.py
@@ -51,7 +51,7 @@
 from autotest_lib.utils import labellib
 from autotest_lib.utils import gslib
 from autotest_lib.utils.side_effects import config_loader
-from chromite.lib import timeout_util
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
 
 # Autotest requires the psutil module from site-packages, so it must be imported
 # after "import common".
@@ -61,11 +61,11 @@
 except ImportError:
     psutil = None
 
-from chromite.lib import parallel
+from autotest_lib.utils.frozen_chromite.lib import parallel
 import six
 try:
-    from chromite.lib import metrics
-    from chromite.lib import ts_mon_config
+    from autotest_lib.utils.frozen_chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 except ImportError:
     metrics = utils.metrics_mock
     ts_mon_config = utils.metrics_mock
diff --git a/site_utils/gs_offloader_unittest.py b/site_utils/gs_offloader_unittest.py
index 92484af..6e41f04 100755
--- a/site_utils/gs_offloader_unittest.py
+++ b/site_utils/gs_offloader_unittest.py
@@ -39,7 +39,7 @@
 from autotest_lib.tko import models
 from autotest_lib.utils import gslib
 from autotest_lib.site_utils import pubsub_utils
-from chromite.lib import timeout_util
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
 from six.moves import range
 
 # Test value to use for `days_old`, if nothing else is required.
diff --git a/site_utils/job_directories.py b/site_utils/job_directories.py
index c1b7f33..e66df66 100755
--- a/site_utils/job_directories.py
+++ b/site_utils/job_directories.py
@@ -16,7 +16,7 @@
 from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/site_utils/kill_slow_queries.py b/site_utils/kill_slow_queries.py
index 29872e9..d098551 100755
--- a/site_utils/kill_slow_queries.py
+++ b/site_utils/kill_slow_queries.py
@@ -17,8 +17,8 @@
 from autotest_lib.site_utils.stats import mysql_stats
 
 try:
-    from chromite.lib import metrics
-    from chromite.lib import ts_mon_config
+    from autotest_lib.utils.frozen_chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 except ImportError:
     metrics = utils.metrics_mock
     ts_mon_config = utils.metrics_mock
diff --git a/site_utils/label_cleaner.py b/site_utils/label_cleaner.py
index 2b9e277..9537c3e 100755
--- a/site_utils/label_cleaner.py
+++ b/site_utils/label_cleaner.py
@@ -34,8 +34,8 @@
 from autotest_lib.client.common_lib import global_config
 from autotest_lib.client.common_lib import logging_config
 from autotest_lib.server import frontend
-from chromite.lib import metrics
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 
 
 _METRICS_PREFIX = 'chromeos/autotest/afe_db/admin/label_cleaner'
diff --git a/site_utils/lxc/container.py b/site_utils/lxc/container.py
index dcdbd5a..f43f514 100644
--- a/site_utils/lxc/container.py
+++ b/site_utils/lxc/container.py
@@ -18,7 +18,7 @@
 from autotest_lib.site_utils.lxc import utils as lxc_utils
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/site_utils/lxc/container_bucket.py b/site_utils/lxc/container_bucket.py
index 34109ec..d73b852 100644
--- a/site_utils/lxc/container_bucket.py
+++ b/site_utils/lxc/container_bucket.py
@@ -22,7 +22,7 @@
 from autotest_lib.site_utils.lxc.container_factory import ContainerFactory
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
     from infra_libs import ts_mon
 except ImportError:
     import mock
diff --git a/site_utils/lxc/container_factory.py b/site_utils/lxc/container_factory.py
index e235c60..409f778 100644
--- a/site_utils/lxc/container_factory.py
+++ b/site_utils/lxc/container_factory.py
@@ -11,7 +11,7 @@
 from autotest_lib.site_utils.lxc import container
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/site_utils/lxc/lxc.py b/site_utils/lxc/lxc.py
index b53aab7..2c749c4 100644
--- a/site_utils/lxc/lxc.py
+++ b/site_utils/lxc/lxc.py
@@ -15,7 +15,7 @@
 from autotest_lib.site_utils.lxc import constants
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = common_utils.metrics_mock
 
diff --git a/site_utils/lxc/lxc_functional_test.py b/site_utils/lxc/lxc_functional_test.py
index 089200f..a4aef39 100755
--- a/site_utils/lxc/lxc_functional_test.py
+++ b/site_utils/lxc/lxc_functional_test.py
@@ -79,7 +79,7 @@
 
 TEST_SCRIPT_CONTENT_TS_MON = """
 # Test ts_mon metrics can be set up.
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 ts_mon_config.SetupTsMonGlobalState('some_test', suppress_exception=False)
 """
 
diff --git a/site_utils/pubsub_utils_unittest.py b/site_utils/pubsub_utils_unittest.py
index c052a9f..3bfde6c 100644
--- a/site_utils/pubsub_utils_unittest.py
+++ b/site_utils/pubsub_utils_unittest.py
@@ -12,7 +12,7 @@
 import mox
 
 # TODO(crbug.com/1050892): The unittests rely on apiclient in chromite.
-import chromite  # pylint: disable=unused-import
+import autotest_lib.utils.frozen_chromite  # pylint: disable=unused-import
 
 from apiclient import discovery
 from oauth2client.client import ApplicationDefaultCredentialsError
diff --git a/site_utils/rpc_flight_recorder.py b/site_utils/rpc_flight_recorder.py
index f86f980..0d96443 100755
--- a/site_utils/rpc_flight_recorder.py
+++ b/site_utils/rpc_flight_recorder.py
@@ -20,8 +20,8 @@
 # pylint: disable=unused-import
 from autotest_lib.server import site_host_attributes
 from autotest_lib.site_utils import server_manager_utils
-from chromite.lib import metrics
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 
 METRIC_ROOT = 'chromeos/autotest/blackbox/afe_rpc'
 METRIC_RPC_CALL_DURATIONS = METRIC_ROOT + '/rpc_call_durations'
diff --git a/site_utils/rpm_control_system/rpm_client.py b/site_utils/rpm_control_system/rpm_client.py
index 548360b..52aa721 100755
--- a/site_utils/rpm_control_system/rpm_client.py
+++ b/site_utils/rpm_control_system/rpm_client.py
@@ -15,7 +15,7 @@
 from autotest_lib.client.common_lib.cros import retry
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     from autotest_lib.client.bin.utils import metrics_mock as metrics
 
diff --git a/site_utils/stats/apache_access_log_metrics.py b/site_utils/stats/apache_access_log_metrics.py
index fcf9c37..08df917 100755
--- a/site_utils/stats/apache_access_log_metrics.py
+++ b/site_utils/stats/apache_access_log_metrics.py
@@ -15,8 +15,8 @@
 
 import common
 
-from chromite.lib import ts_mon_config
-from chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import metrics
 
 from autotest_lib.site_utils.stats import log_daemon_common
 # Not used, but needed for importing rpc_interface.
diff --git a/site_utils/stats/apache_error_log_metrics.py b/site_utils/stats/apache_error_log_metrics.py
index 3617ad9..3b512d1 100755
--- a/site_utils/stats/apache_error_log_metrics.py
+++ b/site_utils/stats/apache_error_log_metrics.py
@@ -17,8 +17,8 @@
 
 import common
 
-from chromite.lib import metrics
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 # infra_libs comes from chromite's third_party modules.
 from infra_libs import ts_mon
 
diff --git a/site_utils/stats/mysql_stats.py b/site_utils/stats/mysql_stats.py
index 8c9e766..5e0aa91 100755
--- a/site_utils/stats/mysql_stats.py
+++ b/site_utils/stats/mysql_stats.py
@@ -21,8 +21,8 @@
 from autotest_lib.client.common_lib import global_config
 from autotest_lib.client.common_lib.cros import retry
 
-from chromite.lib import metrics
-from chromite.lib import ts_mon_config
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import ts_mon_config
 
 AT_DIR='/usr/local/autotest'
 DEFAULT_USER = global_config.global_config.get_config_value(
diff --git a/tko/db.py b/tko/db.py
index a21ece3..aa16ca5 100644
--- a/tko/db.py
+++ b/tko/db.py
@@ -35,7 +35,7 @@
 import six
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/tko/parse.py b/tko/parse.py
index abd1153..586070b 100755
--- a/tko/parse.py
+++ b/tko/parse.py
@@ -36,7 +36,7 @@
 import six
 
 try:
-    from chromite.lib import metrics
+    from autotest_lib.utils.frozen_chromite.lib import metrics
 except ImportError:
     metrics = utils.metrics_mock
 
diff --git a/utils/frozen_chromite/README.md b/utils/frozen_chromite/README.md
new file mode 100644
index 0000000..17cb7df
--- /dev/null
+++ b/utils/frozen_chromite/README.md
@@ -0,0 +1,5 @@
+This is a fork of chromite code used by autotest.
+Since autotest is stuck on Python 2 and is holding back chromite,
+we've pulled out the code that autotest cares about here.
+If/when autotest supports Python 3.6+ only, it can migrate back
+to using chromite directly.
diff --git a/utils/frozen_chromite/__init__.py b/utils/frozen_chromite/__init__.py
new file mode 100644
index 0000000..96b9d61
--- /dev/null
+++ b/utils/frozen_chromite/__init__.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
+                                'third_party'))
diff --git a/utils/frozen_chromite/cli/__init__.py b/utils/frozen_chromite/cli/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/utils/frozen_chromite/cli/__init__.py
diff --git a/utils/frozen_chromite/cli/command.py b/utils/frozen_chromite/cli/command.py
new file mode 100644
index 0000000..4f84af4
--- /dev/null
+++ b/utils/frozen_chromite/cli/command.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module that contains meta-logic related to CLI commands.
+
+This module contains two important definitions used by all commands:
+  CliCommand: The parent class of all CLI commands.
+  CommandDecorator: Decorator that must be used to ensure that the command shows
+    up in |_commands| and is discoverable.
+
+Commands can be either imported directly or looked up using this module's
+ListCommands() function.
+"""
+
+from __future__ import print_function
+
+import importlib
+import os
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import commandline
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+
+
+# Paths for finding and importing subcommand modules.
+_SUBCOMMAND_MODULE_DIRECTORY = os.path.join(os.path.dirname(__file__), 'cros')
+_SUBCOMMAND_MODULE_PREFIX = 'cros_'
+
+
+_commands = dict()
+
+
+def UseProgressBar():
+  """Determine whether the progress bar is to be used or not.
+
+  We only want the progress bar to display for the brillo commands which operate
+  at logging level NOTICE. If the user wants to see the noisy output, then they
+  can execute the command at logging level INFO or DEBUG.
+  """
+  return logging.getLogger().getEffectiveLevel() == logging.NOTICE
+
+
+def ImportCommand(name):
+  """Directly import the specified subcommand.
+
+  This method imports the module which must contain the single subcommand.  When
+  the module is loaded, the declared command (those that use CommandDecorator)
+  will automatically get added to |_commands|.
+
+  Args:
+    name: The subcommand to load.
+
+  Returns:
+    A reference to the subcommand class.
+  """
+  module_path = os.path.join(_SUBCOMMAND_MODULE_DIRECTORY,
+                             'cros_%s' % (name.replace('-', '_'),))
+  import_path = os.path.relpath(os.path.realpath(module_path),
+                                os.path.dirname(constants.CHROMITE_DIR))
+  module_parts = import_path.split(os.path.sep)
+  importlib.import_module('.'.join(module_parts))
+  return _commands[name]
+
+
+def ListCommands():
+  """Return the set of available subcommands.
+
+  We assume that there is a direct one-to-one relationship between the module
+  name on disk and the command that module implements.  We assume this as a
+  performance requirement (to avoid importing every subcommand every time even
+  though we'd only ever run a single one), and to avoid 3rd party module usage
+  in one subcommand breaking all other subcommands (not a great solution).
+  """
+  # Filenames use underscores due to python naming limitations, but subcommands
+  # use dashes as they're easier for humans to type.
+  # Strip off the leading "cros_" and the trailing ".py".
+  return set(x[5:-3].replace('_', '-')
+             for x in os.listdir(_SUBCOMMAND_MODULE_DIRECTORY)
+             if (x.startswith(_SUBCOMMAND_MODULE_PREFIX) and x.endswith('.py')
+                 and not x.endswith('_unittest.py')))
+
+
+class InvalidCommandError(Exception):
+  """Error that occurs when command class fails sanity checks."""
+
+
+def CommandDecorator(command_name):
+  """Decorator that sanity checks and adds class to list of usable commands."""
+
+  def InnerCommandDecorator(original_class):
+    """Inner Decorator that actually wraps the class."""
+    if not hasattr(original_class, '__doc__'):
+      raise InvalidCommandError('All handlers must have docstrings: %s' %
+                                original_class)
+
+    if not issubclass(original_class, CliCommand):
+      raise InvalidCommandError('All Commands must derive from CliCommand: %s' %
+                                original_class)
+
+    _commands[command_name] = original_class
+    original_class.command_name = command_name
+
+    return original_class
+
+  return InnerCommandDecorator
+
+
+class CliCommand(object):
+  """All CLI commands must derive from this class.
+
+  This class provides the abstract interface for all CLI commands. When
+  designing a new command, you must sub-class from this class and use the
+  CommandDecorator decorator. You must specify a class docstring as that will be
+  used as the usage for the sub-command.
+
+  In addition your command should implement AddParser which is passed in a
+  parser that you can add your own custom arguments. See argparse for more
+  information.
+  """
+  # Indicates whether command uses cache related commandline options.
+  use_caching_options = False
+
+  def __init__(self, options):
+    self.options = options
+
+  @classmethod
+  def AddParser(cls, parser):
+    """Add arguments for this command to the parser."""
+    parser.set_defaults(command_class=cls)
+
+  @classmethod
+  def AddDeviceArgument(cls, parser, schemes=commandline.DEVICE_SCHEME_SSH,
+                        positional=False):
+    """Add a device argument to the parser.
+
+    This standardizes the help message across all subcommands.
+
+    Args:
+      parser: The parser to add the device argument to.
+      schemes: List of device schemes or single scheme to allow.
+      positional: Whether it should be a positional or named argument.
+    """
+    help_strings = []
+    schemes = list(cros_build_lib.iflatten_instance(schemes))
+    if commandline.DEVICE_SCHEME_SSH in schemes:
+      help_strings.append('Target a device with [user@]hostname[:port]. '
+                          'IPv4/IPv6 addresses are allowed, but IPv6 must '
+                          'use brackets (e.g. [::1]).')
+    if commandline.DEVICE_SCHEME_USB in schemes:
+      help_strings.append('Target removable media with usb://[path].')
+    if commandline.DEVICE_SCHEME_SERVO in schemes:
+      help_strings.append('Target a servo by port or serial number with '
+                          'servo:port[:port] or servo:serial:serial-number. '
+                          'e.g. servo:port:1234 or servo:serial:C1230024192.')
+    if commandline.DEVICE_SCHEME_FILE in schemes:
+      help_strings.append('Target a local file with file://path.')
+    if positional:
+      parser.add_argument('device',
+                          type=commandline.DeviceParser(schemes),
+                          help=' '.join(help_strings))
+    else:
+      parser.add_argument('-d', '--device',
+                          type=commandline.DeviceParser(schemes),
+                          help=' '.join(help_strings))
+
+  def Run(self):
+    """The command to run."""
+    raise NotImplementedError()
diff --git a/utils/frozen_chromite/lib/__init__.py b/utils/frozen_chromite/lib/__init__.py
new file mode 100644
index 0000000..4143aec
--- /dev/null
+++ b/utils/frozen_chromite/lib/__init__.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import sys
+
+
+# This is to work around a Python bug:  The first call to
+# datetime.datetime.strptime() within the Python VM can fail if it
+# happens in a multi-threaded context.  To work around that, we force a
+# "safe" call here.  For more details, see:
+#     https://bugs.python.org/issue7980
+#     https://crbug.com/710182
+if sys.version_info.major < 3:
+  import datetime
+  datetime.datetime.strptime(datetime.datetime.now().strftime('%Y'), '%Y')
diff --git a/utils/frozen_chromite/lib/auth.py b/utils/frozen_chromite/lib/auth.py
new file mode 100644
index 0000000..1cf08a1
--- /dev/null
+++ b/utils/frozen_chromite/lib/auth.py
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Functions for authenticating httplib2 requests with OAuth2 tokens."""
+
+from __future__ import print_function
+
+import os
+
+import httplib2
+
+from autotest_lib.utils.frozen_chromite.lib import cipd
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import path_util
+
+
+REFRESH_STATUS_CODES = [401]
+
+# Retry times on get_access_token
+RETRY_GET_ACCESS_TOKEN = 3
+
+
+class AccessTokenError(Exception):
+  """Error accessing the token."""
+
+
+def _GetCipdBinary(pkg_name, bin_name, instance_id):
+  """Returns a local path to the given binary fetched from cipd."""
+  cache_dir = os.path.join(path_util.GetCacheDir(), 'cipd', 'packages')
+  path = cipd.InstallPackage(
+      cipd.GetCIPDFromCache(),
+      pkg_name,
+      instance_id,
+      destination=cache_dir)
+
+  return os.path.join(path, bin_name)
+
+
+# crbug:871831 default to last sha1 version.
+def GetLuciAuth(
+    instance_id='git_revision:fd059ace316e4dbcaa5afdcec9ed4a855c4f3c65'):
+  """Returns a path to the luci-auth binary.
+
+  This will download and install the luci-auth package if it is not already
+  deployed.
+
+  Args:
+    instance_id: The instance-id of the package to install.
+
+  Returns:
+    the path to the luci-auth binary.
+  """
+  return _GetCipdBinary(
+      'infra/tools/luci-auth/linux-amd64',
+      'luci-auth',
+      instance_id)
+
+
+# crbug:871831 default to last sha1 version.
+def GetLuciGitCreds(
+    instance_id='git_revision:fd059ace316e4dbcaa5afdcec9ed4a855c4f3c65'):
+  """Returns a path to the git-credential-luci binary.
+
+  This will download and install the git-credential-luci package if it is not
+  already deployed.
+
+  Args:
+    instance_id: The instance-id of the package to install.
+
+  Returns:
+    the path to the git-credential-luci binary.
+  """
+  return _GetCipdBinary(
+      'infra/tools/luci/git-credential-luci/linux-amd64',
+      'git-credential-luci',
+      instance_id)
+
+
+def Login(service_account_json=None):
+  """Logs a user into chrome-infra-auth using luci-auth.
+
+  Runs 'luci-auth login' to get a OAuth2 refresh token.
+
+  Args:
+    service_account_json: A optional path to a service account.
+
+  Raises:
+    AccessTokenError if login command failed.
+  """
+  logging.info('Logging into chrome-infra-auth with service_account %s',
+               service_account_json)
+
+  cmd = [GetLuciAuth(), 'login']
+  if service_account_json and os.path.isfile(service_account_json):
+    cmd += ['-service-account-json=%s' % service_account_json]
+
+  result = cros_build_lib.run(
+      cmd,
+      print_cmd=True,
+      check=False)
+
+  if result.returncode:
+    raise AccessTokenError('Failed at  logging in to chrome-infra-auth: %s,'
+                           ' may retry.')
+
+
+def Token(service_account_json=None):
+  """Get the token using luci-auth.
+
+  Runs 'luci-auth token' to get the OAuth2 token.
+
+  Args:
+    service_account_json: A optional path to a service account.
+
+  Returns:
+    The token string if the command succeeded;
+
+  Raises:
+    AccessTokenError if token command failed.
+  """
+  cmd = [GetLuciAuth(), 'token']
+  if service_account_json and os.path.isfile(service_account_json):
+    cmd += ['-service-account-json=%s' % service_account_json]
+
+  result = cros_build_lib.run(
+      cmd,
+      print_cmd=False,
+      capture_output=True,
+      check=False,
+      encoding='utf-8')
+
+  if result.returncode:
+    raise AccessTokenError('Failed at getting the access token, may retry.')
+
+  return result.output.strip()
+
+
+def _TokenAndLoginIfNeed(service_account_json=None, force_token_renew=False):
+  """Run Token and Login opertions.
+
+  If force_token_renew is on, run Login operation first to force token renew,
+  then run Token operation to return token string.
+  If force_token_renew is off, run Token operation first. If no token found,
+  run Login operation to refresh the token. Throw an AccessTokenError after
+  running the Login operation, so that GetAccessToken can retry on
+  _TokenAndLoginIfNeed.
+
+  Args:
+    service_account_json: A optional path to a service account.
+    force_token_renew: Boolean indicating whether to force login to renew token
+      before returning a token. Default to False.
+
+  Returns:
+    The token string if the command succeeded; else, None.
+
+  Raises:
+    AccessTokenError if the Token operation failed.
+  """
+  if force_token_renew:
+    Login(service_account_json=service_account_json)
+    return Token(service_account_json=service_account_json)
+  else:
+    try:
+      return Token(service_account_json=service_account_json)
+    except AccessTokenError as e:
+      Login(service_account_json=service_account_json)
+      # Raise the error and let the caller decide wether to retry
+      raise e
+
+
+def GetAccessToken(**kwargs):
+  """Returns an OAuth2 access token using luci-auth.
+
+  Retry the _TokenAndLoginIfNeed function when the error thrown is an
+  AccessTokenError.
+
+  Args:
+    kwargs: A list of keyword arguments to pass to _TokenAndLoginIfNeed.
+
+  Returns:
+    The access token string or None if failed to get access token.
+  """
+  service_account_json = kwargs.get('service_account_json')
+  force_token_renew = kwargs.get('force_token_renew', False)
+  retry = lambda e: isinstance(e, AccessTokenError)
+  try:
+    result = retry_util.GenericRetry(
+        retry, RETRY_GET_ACCESS_TOKEN,
+        _TokenAndLoginIfNeed,
+        service_account_json=service_account_json,
+        force_token_renew=force_token_renew,
+        sleep=3)
+    return result
+  except AccessTokenError as e:
+    logging.error('Failed at getting the access token: %s ', e)
+    # Do not raise the AccessTokenError here.
+    # Let the response returned by the request handler
+    # tell the status and errors.
+    return
+
+
+def GitCreds(service_account_json=None):
+  """Get the git credential using git-credential-luci.
+
+  Args:
+    service_account_json: A optional path to a service account.
+
+  Returns:
+    The git credential if the command succeeded;
+
+  Raises:
+    AccessTokenError if token command failed.
+  """
+  cmd = [GetLuciGitCreds(), 'get']
+  if service_account_json and os.path.isfile(service_account_json):
+    cmd += ['-service-account-json=%s' % service_account_json]
+
+  result = cros_build_lib.run(
+      cmd,
+      print_cmd=False,
+      capture_output=True,
+      check=False,
+      encoding='utf-8')
+
+  if result.returncode:
+    raise AccessTokenError('Unable to fetch git credential.')
+
+  for line in result.stdout.splitlines():
+    if line.startswith('password='):
+      return line.split('password=')[1].strip()
+
+  raise AccessTokenError('Unable to fetch git credential.')
+
+
+class AuthorizedHttp(object):
+  """Authorized http instance"""
+
+  def __init__(self, get_access_token, http, **kwargs):
+    self.get_access_token = get_access_token
+    self.http = http if http is not None else httplib2.Http()
+    self.token = self.get_access_token(**kwargs)
+    self.kwargs = kwargs
+
+  # Adapted from oauth2client.OAuth2Credentials.authorize.
+  # We can't use oauthclient2 because the import will fail on slaves due to
+  # missing PyOpenSSL (crbug.com/498467).
+  def request(self, *args, **kwargs):
+    headers = kwargs.get('headers', {}).copy()
+    headers['Authorization'] = 'Bearer %s' % self.token
+    kwargs['headers'] = headers
+
+    resp, content = self.http.request(*args, **kwargs)
+    if resp.status in REFRESH_STATUS_CODES:
+      logging.info('OAuth token TTL expired, auto-refreshing')
+
+      # Token expired, force token renew
+      kwargs_copy = dict(self.kwargs, force_token_renew=True)
+      self.token = self.get_access_token(**kwargs_copy)
+
+      # TODO(phobbs): delete the "access_token" key from the token file used.
+      headers['Authorization'] = 'Bearer %s' % self.token
+      resp, content = self.http.request(*args, **kwargs)
+
+    return resp, content
diff --git a/utils/frozen_chromite/lib/auto_update_util.py b/utils/frozen_chromite/lib/auto_update_util.py
new file mode 100644
index 0000000..dc7b0af
--- /dev/null
+++ b/utils/frozen_chromite/lib/auto_update_util.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This file contains util functions for the auto-update lib."""
+
+from __future__ import print_function
+
+import re
+
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+
+
+LSB_RELEASE = '/etc/lsb-release'
+
+
+def GetChromeosBuildInfo(lsb_release_content=None, regex=None):
+  """Get chromeos build info in device under test as string. None on fail.
+
+  Args:
+    lsb_release_content: A string represents the content of lsb-release.
+        If the caller is from drone, it can pass in the file content here.
+    regex: A regular expression, refers to which line this func tries to fetch
+        from lsb_release_content.
+
+  Returns:
+    A kind of chromeos build info in device under test as string. None on fail.
+  """
+  if not lsb_release_content or not regex:
+    return None
+
+  for line in lsb_release_content.split('\n'):
+    m = re.match(regex, line)
+    if m:
+      return m.group(1)
+
+  return None
+
+def VersionMatch(build_version, release_version):
+  """Compare release version from lsb-release with cros-version label.
+
+  build_version is a string based on build name. It is prefixed with builder
+  info and branch ID, e.g., lumpy-release/R43-6809.0.0.
+  release_version is retrieved from lsb-release.
+  These two values might not match exactly.
+
+  The method is designed to compare version for following 6 scenarios with
+  samples of build version and expected release version:
+  1. trybot non-release build (paladin, pre-cq or test-ap build).
+  build version:   trybot-lumpy-paladin/R27-3837.0.0-b123
+  release version: 3837.0.2013_03_21_1340
+
+  2. trybot release build.
+  build version:   trybot-lumpy-release/R27-3837.0.0-b456
+  release version: 3837.0.0
+
+  3. buildbot official release build.
+  build version:   lumpy-release/R27-3837.0.0
+  release version: 3837.0.0
+
+  4. non-official paladin rc build.
+  build version:   lumpy-paladin/R27-3878.0.0-rc7
+  release version: 3837.0.0-rc7
+
+  5. chrome-perf build.
+  build version:   lumpy-chrome-perf/R28-3837.0.0-b2996
+  release version: 3837.0.0
+
+  6. pgo-generate build.
+  build version:   lumpy-release-pgo-generate/R28-3837.0.0-b2996
+  release version: 3837.0.0-pgo-generate
+
+  TODO: This logic has a bug if a trybot paladin build failed to be
+  installed in a DUT running an older trybot paladin build with same
+  platform number, but different build number (-b###). So to conclusively
+  determine if a tryjob paladin build is imaged successfully, we may need
+  to find out the date string from update url.
+
+  Args:
+    build_version: Build name for cros version, e.g.
+        peppy-release/R43-6809.0.0 or R43-6809.0.0
+    release_version: Release version retrieved from lsb-release,
+        e.g., 6809.0.0
+
+  Returns:
+    True if the values match, otherwise returns False.
+  """
+  # If the build is from release, CQ or PFQ builder, cros-version label must
+  # be ended with release version in lsb-release.
+
+  if build_version.endswith(release_version):
+    return True
+
+  # Remove R#- and -b# at the end of build version
+  stripped_version = re.sub(r'(R\d+-|-b\d+)', '', build_version)
+  # Trim the builder info, e.g., trybot-lumpy-paladin/
+  stripped_version = stripped_version.split('/')[-1]
+
+  # Add toolchain here since is_trybot_non_release_build cannot detect build
+  # like 'trybot-sentry-llvm-toolchain/R56-8885.0.0-b943'.
+  is_trybot_non_release_build = re.match(
+      r'.*trybot-.+-(paladin|pre-cq|test-ap|toolchain)', build_version)
+
+  # Replace date string with 0 in release_version
+  release_version_no_date = re.sub(r'\d{4}_\d{2}_\d{2}_\d+', '0',
+                                   release_version)
+  has_date_string = release_version != release_version_no_date
+
+  is_pgo_generate_build = re.match(r'.+-pgo-generate', build_version)
+
+  # Remove |-pgo-generate| in release_version
+  release_version_no_pgo = release_version.replace('-pgo-generate', '')
+  has_pgo_generate = release_version != release_version_no_pgo
+
+  if is_trybot_non_release_build:
+    if not has_date_string:
+      logging.error('A trybot paladin or pre-cq build is expected. '
+                    'Version "%s" is not a paladin or pre-cq  build.',
+                    release_version)
+      return False
+    return stripped_version == release_version_no_date
+  elif is_pgo_generate_build:
+    if not has_pgo_generate:
+      logging.error('A pgo-generate build is expected. Version '
+                    '"%s" is not a pgo-generate build.',
+                    release_version)
+      return False
+    return stripped_version == release_version_no_pgo
+  else:
+    if has_date_string:
+      logging.error('Unexpected date found in a non trybot paladin or '
+                    'pre-cq build.')
+      return False
+    # Versioned build, i.e., rc or release build.
+    return stripped_version == release_version
diff --git a/utils/frozen_chromite/lib/auto_updater.py b/utils/frozen_chromite/lib/auto_updater.py
new file mode 100644
index 0000000..2eb143b
--- /dev/null
+++ b/utils/frozen_chromite/lib/auto_updater.py
@@ -0,0 +1,1034 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Library containing functions to execute auto-update on a remote device.
+
+ChromiumOSUpdater includes:
+  ----Check-----
+  * Check functions, including kernel/version/cgpt check.
+
+  ----Precheck---
+  * Pre-check if the device can run its nebraska.
+  * Pre-check for stateful/rootfs update/whole update.
+
+  ----Tranfer----
+  * This step is carried out by Transfer subclasses in
+    auto_updater_transfer.py.
+
+  ----Auto-Update---
+  * Do rootfs partition update if it's required.
+  * Do stateful partition update if it's required.
+  * Do reboot for device if it's required.
+
+  ----Verify----
+  * Do verification if it's required.
+  * Disable rootfs verification in device if it's required.
+  * Post-check stateful/rootfs update/whole update.
+"""
+
+from __future__ import print_function
+
+import json
+import os
+import re
+import subprocess
+import tempfile
+import time
+
+import six
+
+from autotest_lib.utils.frozen_chromite.cli import command
+from autotest_lib.utils.frozen_chromite.lib import auto_update_util
+from autotest_lib.utils.frozen_chromite.lib import auto_updater_transfer
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import nebraska_wrapper
+from autotest_lib.utils.frozen_chromite.lib import operation
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import remote_access
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import stateful_updater
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+
+from autotest_lib.utils.frozen_chromite.utils import key_value_store
+
+# Naming conventions for global variables:
+#   File on remote host without slash: REMOTE_XXX_FILENAME
+#   File on remote host with slash: REMOTE_XXX_FILE_PATH
+#   Path on remote host with slash: REMOTE_XXX_PATH
+#   File on local server without slash: LOCAL_XXX_FILENAME
+
+# Update Status for remote device.
+UPDATE_STATUS_IDLE = 'UPDATE_STATUS_IDLE'
+UPDATE_STATUS_DOWNLOADING = 'UPDATE_STATUS_DOWNLOADING'
+UPDATE_STATUS_FINALIZING = 'UPDATE_STATUS_FINALIZING'
+UPDATE_STATUS_UPDATED_NEED_REBOOT = 'UPDATE_STATUS_UPDATED_NEED_REBOOT'
+
+# Max number of the times for retry:
+# 1. for transfer functions to be retried.
+# 2. for some retriable commands to be retried.
+MAX_RETRY = 5
+
+# The delay between retriable tasks.
+DELAY_SEC_FOR_RETRY = 5
+
+# Number of seconds to wait for the post check version to settle.
+POST_CHECK_SETTLE_SECONDS = 15
+
+# Number of seconds to delay between post check retries.
+POST_CHECK_RETRY_SECONDS = 5
+
+
+class ChromiumOSUpdateError(Exception):
+  """Thrown when there is a general ChromiumOS-specific update error."""
+
+
+class PreSetupUpdateError(ChromiumOSUpdateError):
+  """Raised for the rootfs/stateful update pre-setup failures."""
+
+
+class RootfsUpdateError(ChromiumOSUpdateError):
+  """Raised for the Rootfs partition update failures."""
+
+
+class StatefulUpdateError(ChromiumOSUpdateError):
+  """Raised for the stateful partition update failures."""
+
+
+class AutoUpdateVerifyError(ChromiumOSUpdateError):
+  """Raised for verification failures after auto-update."""
+
+
+class RebootVerificationError(ChromiumOSUpdateError):
+  """Raised for failing to reboot errors."""
+
+
+class BaseUpdater(object):
+  """The base updater class."""
+
+  def __init__(self, device, payload_dir):
+    self.device = device
+    self.payload_dir = payload_dir
+
+
+class ChromiumOSUpdater(BaseUpdater):
+  """Used to update DUT with image."""
+
+  # Nebraska files.
+  LOCAL_NEBRASKA_LOG_FILENAME = 'nebraska.log'
+  REMOTE_NEBRASKA_FILENAME = 'nebraska.py'
+
+  # rootfs update files.
+  REMOTE_UPDATE_ENGINE_BIN_FILENAME = 'update_engine_client'
+  REMOTE_UPDATE_ENGINE_LOGFILE_PATH = '/var/log/update_engine.log'
+
+  UPDATE_CHECK_INTERVAL_PROGRESSBAR = 0.5
+  UPDATE_CHECK_INTERVAL_NORMAL = 10
+
+  # `mode` parameter when copying payload files to the DUT.
+  PAYLOAD_MODE_PARALLEL = 'parallel'
+  PAYLOAD_MODE_SCP = 'scp'
+
+  # Related to crbug.com/276094: Restore to 5 mins once the 'host did not
+  # return from reboot' bug is solved.
+  REBOOT_TIMEOUT = 480
+
+  REMOTE_STATEFUL_PATH_TO_CHECK = ('/var', '/home', '/mnt/stateful_partition')
+  REMOTE_STATEFUL_TEST_FILENAME = '.test_file_to_be_deleted'
+  REMOTE_UPDATED_MARKERFILE_PATH = '/run/update_engine_autoupdate_completed'
+  REMOTE_LAB_MACHINE_FILE_PATH = '/mnt/stateful_partition/.labmachine'
+  KERNEL_A = {'name': 'KERN-A', 'kernel': 2, 'root': 3}
+  KERNEL_B = {'name': 'KERN-B', 'kernel': 4, 'root': 5}
+  KERNEL_UPDATE_TIMEOUT = 180
+
+  def __init__(self, device, build_name, payload_dir, transfer_class,
+               log_file=None, tempdir=None, clobber_stateful=True,
+               yes=False, do_rootfs_update=True, do_stateful_update=True,
+               reboot=True, disable_verification=False,
+               send_payload_in_parallel=False, payload_filename=None,
+               staging_server=None, clear_tpm_owner=False):
+    """Initialize a ChromiumOSUpdater for auto-update a chromium OS device.
+
+    Args:
+      device: the ChromiumOSDevice to be updated.
+      build_name: the target update version for the device.
+      payload_dir: the directory of payload(s).
+      transfer_class: A reference to any subclass of
+          auto_updater_transfer.Transfer class.
+      log_file: The file to save running logs.
+      tempdir: the temp directory in caller, not in the device. For example,
+          the tempdir for cros flash is /tmp/cros-flash****/, used to
+          temporarily keep files when transferring update-utils package, and
+          reserve nebraska and update engine logs.
+      do_rootfs_update: whether to do rootfs partition update. The default is
+          True.
+      do_stateful_update: whether to do stateful partition update. The default
+          is True.
+      reboot: whether to reboot device after update. The default is True.
+      disable_verification: whether to disabling rootfs verification on the
+          device. The default is False.
+      clobber_stateful: whether to do a clean stateful update. The default is
+          False.
+      yes: Assume "yes" (True) for any prompt. The default is False. However,
+          it should be set as True if we want to disable all the prompts for
+          auto-update.
+      payload_filename: Filename of exact payload file to use for
+          update instead of the default: update.gz. Defaults to None. Use
+          only if you staged a payload by filename (i.e not artifact) first.
+      send_payload_in_parallel: whether to transfer payload in chunks
+          in parallel. The default is False.
+      staging_server: URL (str) of the server that's staging the payload files.
+          Assuming transfer_class is None, if value for staging_server is None
+          or empty, an auto_updater_transfer.LocalTransfer reference must be
+          passed through the transfer_class parameter.
+      clear_tpm_owner: If true, it will clear the TPM owner on reboot.
+    """
+    super(ChromiumOSUpdater, self).__init__(device, payload_dir)
+
+    self.tempdir = (tempdir if tempdir is not None
+                    else tempfile.mkdtemp(prefix='cros-update'))
+    self.inactive_kernel = None
+    self.update_version = build_name
+
+    # Update setting
+    self._cmd_kwargs = {}
+    self._cmd_kwargs_omit_error = {'check': False}
+    self._do_stateful_update = do_stateful_update
+    self._do_rootfs_update = do_rootfs_update
+    self._disable_verification = disable_verification
+    self._clobber_stateful = clobber_stateful
+    self._reboot = reboot
+    self._yes = yes
+    # Device's directories
+    self.device_dev_dir = os.path.join(self.device.work_dir, 'src')
+    self.device_payload_dir = os.path.join(
+        self.device.work_dir,
+        auto_updater_transfer.Transfer.PAYLOAD_DIR_NAME)
+    # autoupdate_EndToEndTest uses exact payload filename for update
+    self.payload_filename = payload_filename
+    if send_payload_in_parallel:
+      self.payload_mode = self.PAYLOAD_MODE_PARALLEL
+    else:
+      self.payload_mode = self.PAYLOAD_MODE_SCP
+    self.perf_id = None
+
+    if log_file:
+      log_kwargs = {
+          'stdout': log_file,
+          'append_to_file': True,
+          'stderr': subprocess.STDOUT,
+      }
+      self._cmd_kwargs.update(log_kwargs)
+      self._cmd_kwargs_omit_error.update(log_kwargs)
+
+    self._staging_server = staging_server
+    self._transfer_obj = self._CreateTransferObject(transfer_class)
+
+    self._clear_tpm_owner = clear_tpm_owner
+
+  @property
+  def is_au_endtoendtest(self):
+    return self.payload_filename is not None
+
+  @property
+  def request_logs_dir(self):
+    """Returns path to the nebraska request logfiles directory.
+
+    Returns:
+      A complete path to the logfiles directory.
+    """
+    return self.tempdir
+
+  def _CreateTransferObject(self, transfer_class):
+    """Create the correct Transfer class.
+
+    Args:
+      transfer_class: A variable that contains a reference to one of the
+          Transfer classes in auto_updater_transfer.
+    """
+    assert issubclass(transfer_class, auto_updater_transfer.Transfer)
+
+    # Determine if staging_server needs to be passed as an argument to
+    # class_ref.
+    cls_kwargs = {}
+    if self._staging_server:
+      cls_kwargs['staging_server'] = self._staging_server
+
+    return transfer_class(
+        device=self.device, payload_dir=self.payload_dir,
+        payload_name=self._GetRootFsPayloadFileName(),
+        cmd_kwargs=self._cmd_kwargs,
+        transfer_rootfs_update=self._do_rootfs_update,
+        transfer_stateful_update=self._do_rootfs_update,
+        device_payload_dir=self.device_payload_dir, tempdir=self.tempdir,
+        payload_mode=self.payload_mode, **cls_kwargs)
+
+  def CheckRestoreStateful(self):
+    """Check whether to restore stateful."""
+    logging.debug('Checking whether to restore stateful...')
+    restore_stateful = False
+    try:
+      self._CheckNebraskaCanRun()
+      return restore_stateful
+    except nebraska_wrapper.NebraskaStartupError as e:
+      if self._do_rootfs_update:
+        msg = ('Cannot start nebraska! The stateful partition may be '
+               'corrupted: %s' % e)
+        prompt = 'Attempt to restore the stateful partition?'
+        restore_stateful = self._yes or cros_build_lib.BooleanPrompt(
+            prompt=prompt, default=False, prolog=msg)
+        if not restore_stateful:
+          raise ChromiumOSUpdateError(
+              'Cannot continue to perform rootfs update!')
+
+    logging.debug('Restore stateful partition is%s required.',
+                  ('' if restore_stateful else ' not'))
+    return restore_stateful
+
+  def _CheckNebraskaCanRun(self):
+    """We can run Nebraska on |device|."""
+    nebraska_bin = os.path.join(self.device_dev_dir,
+                                self.REMOTE_NEBRASKA_FILENAME)
+    nebraska = nebraska_wrapper.RemoteNebraskaWrapper(
+        self.device, nebraska_bin=nebraska_bin)
+    nebraska.CheckNebraskaCanRun()
+
+  @classmethod
+  def GetUpdateStatus(cls, device, keys=None):
+    """Returns the status of the update engine on the |device|.
+
+    Retrieves the status from update engine and confirms all keys are
+    in the status.
+
+    Args:
+      device: A ChromiumOSDevice object.
+      keys: the keys to look for in the status result (defaults to
+          ['CURRENT_OP']).
+
+    Returns:
+      A list of values in the order of |keys|.
+    """
+    keys = keys or ['CURRENT_OP']
+    result = device.run([cls.REMOTE_UPDATE_ENGINE_BIN_FILENAME, '--status'],
+                        capture_output=True, log_output=True)
+
+    if not result.output:
+      raise Exception('Cannot get update status')
+
+    try:
+      status = key_value_store.LoadData(result.output)
+    except ValueError:
+      raise ValueError('Cannot parse update status')
+
+    values = []
+    for key in keys:
+      if key not in status:
+        raise ValueError('Missing "%s" in the update engine status' % key)
+
+      values.append(status.get(key))
+
+    return values
+
+  @classmethod
+  def GetRootDev(cls, device):
+    """Get the current root device on |device|.
+
+    Args:
+      device: a ChromiumOSDevice object, defines whose root device we
+          want to fetch.
+    """
+    rootdev = device.run(
+        ['rootdev', '-s'], capture_output=True).output.strip()
+    logging.debug('Current root device is %s', rootdev)
+    return rootdev
+
+  def _StartUpdateEngineIfNotRunning(self, device):
+    """Starts update-engine service if it is not running.
+
+    Args:
+      device: a ChromiumOSDevice object, defines the target root device.
+    """
+    try:
+      result = device.run(['start', 'update-engine'],
+                          capture_output=True, log_output=True).stdout
+      if 'start/running' in result:
+        logging.info('update engine was not running, so we started it.')
+    except cros_build_lib.RunCommandError as e:
+      if e.result.returncode != 1 or 'is already running' not in e.result.error:
+        raise e
+
+  def SetupRootfsUpdate(self):
+    """Makes sure |device| is ready for rootfs update."""
+    logging.info('Checking if update engine is idle...')
+    self._StartUpdateEngineIfNotRunning(self.device)
+    status = self.GetUpdateStatus(self.device)[0]
+    if status == UPDATE_STATUS_UPDATED_NEED_REBOOT:
+      logging.info('Device needs to reboot before updating...')
+      self._Reboot('setup of Rootfs Update')
+      status = self.GetUpdateStatus(self.device)[0]
+
+    if status != UPDATE_STATUS_IDLE:
+      raise RootfsUpdateError('Update engine is not idle. Status: %s' % status)
+
+    if self.is_au_endtoendtest:
+      # TODO(ahassani): This should only be done for jetsteam devices.
+      self._RetryCommand(['sudo', 'stop', 'ap-update-manager'],
+                         **self._cmd_kwargs_omit_error)
+
+      self._RetryCommand(['rm', '-f', self.REMOTE_UPDATED_MARKERFILE_PATH],
+                         **self._cmd_kwargs)
+      self._RetryCommand(['stop', 'ui'], **self._cmd_kwargs_omit_error)
+
+
+  def _GetDevicePythonSysPath(self):
+    """Get python sys.path of the given |device|."""
+    sys_path = self.device.run(
+        ['python', '-c', '"import json, sys; json.dump(sys.path, sys.stdout)"'],
+        capture_output=True, log_output=True).output
+    return json.loads(sys_path)
+
+  def _FindDevicePythonPackagesDir(self):
+    """Find the python packages directory for the given |device|."""
+    third_party_host_dir = ''
+    sys_path = self._GetDevicePythonSysPath()
+    for p in sys_path:
+      if p.endswith('site-packages') or p.endswith('dist-packages'):
+        third_party_host_dir = p
+        break
+
+    if not third_party_host_dir:
+      raise ChromiumOSUpdateError(
+          'Cannot find proper site-packages/dist-packages directory from '
+          'sys.path for storing packages: %s' % sys_path)
+
+    return third_party_host_dir
+
+  def _GetRootFsPayloadFileName(self):
+    """Get the correct RootFs payload filename.
+
+    Returns:
+      The payload filename. (update.gz or a custom payload filename).
+    """
+    if self.is_au_endtoendtest:
+      return self.payload_filename
+    else:
+      return auto_updater_transfer.ROOTFS_FILENAME
+
+  def ResetStatefulPartition(self):
+    """Clear any pending stateful update request."""
+    logging.debug('Resetting stateful partition...')
+    try:
+      stateful_updater.StatefulUpdater(self.device).Reset()
+    except stateful_updater.Error as e:
+      raise StatefulUpdateError(e)
+
+  def RevertBootPartition(self):
+    """Revert the boot partition."""
+    part = self.GetRootDev(self.device)
+    logging.warning('Reverting update; Boot partition will be %s', part)
+    try:
+      self.device.run(['/postinst', part], **self._cmd_kwargs)
+    except cros_build_lib.RunCommandError as e:
+      logging.warning('Reverting the boot partition failed: %s', e)
+
+  def UpdateRootfs(self):
+    """Update the rootfs partition of the device (utilizing nebraska)."""
+    logging.notice('Updating rootfs partition...')
+    nebraska_bin = os.path.join(self.device_dev_dir,
+                                self.REMOTE_NEBRASKA_FILENAME)
+
+    nebraska = nebraska_wrapper.RemoteNebraskaWrapper(
+        self.device, nebraska_bin=nebraska_bin,
+        update_payloads_address='file://' + self.device_payload_dir,
+        update_metadata_dir=self.device_payload_dir)
+
+    try:
+      nebraska.Start()
+
+      # Use the localhost IP address (default) to ensure that update engine
+      # client can connect to the nebraska.
+      nebraska_url = nebraska.GetURL(critical_update=True)
+      cmd = [self.REMOTE_UPDATE_ENGINE_BIN_FILENAME, '--check_for_update',
+             '--omaha_url="%s"' % nebraska_url]
+
+      self.device.run(cmd, **self._cmd_kwargs)
+
+      # If we are using a progress bar, update it every 0.5s instead of 10s.
+      if command.UseProgressBar():
+        update_check_interval = self.UPDATE_CHECK_INTERVAL_PROGRESSBAR
+        oper = operation.ProgressBarOperation()
+      else:
+        update_check_interval = self.UPDATE_CHECK_INTERVAL_NORMAL
+        oper = None
+      end_message_not_printed = True
+
+      # Loop until update is complete.
+      while True:
+        # Number of times to retry `update_engine_client --status`. See
+        # crbug.com/744212.
+        update_engine_status_retry = 30
+        op, progress = retry_util.RetryException(
+            cros_build_lib.RunCommandError,
+            update_engine_status_retry,
+            self.GetUpdateStatus,
+            self.device,
+            ['CURRENT_OP', 'PROGRESS'],
+            delay_sec=DELAY_SEC_FOR_RETRY)[0:2]
+        logging.info('Waiting for update...status: %s at progress %s',
+                     op, progress)
+
+        if op == UPDATE_STATUS_UPDATED_NEED_REBOOT:
+          logging.info('Update completed.')
+          break
+
+        if op == UPDATE_STATUS_IDLE:
+          # Something went wrong. Try to get last error code.
+          cmd = ['cat', self.REMOTE_UPDATE_ENGINE_LOGFILE_PATH]
+          log = self.device.run(cmd).stdout.strip().splitlines()
+          err_str = 'Updating payload state for error code: '
+          targets = [line for line in log if err_str in line]
+          logging.debug('Error lines found: %s', targets)
+          if not targets:
+            raise RootfsUpdateError(
+                'Update failed with unexpected update status: %s' % op)
+          else:
+            # e.g 20 (ErrorCode::kDownloadStateInitializationError)
+            raise RootfsUpdateError(targets[-1].rpartition(err_str)[2])
+
+        if oper is not None:
+          if op == UPDATE_STATUS_DOWNLOADING:
+            oper.ProgressBar(float(progress))
+          elif end_message_not_printed and op == UPDATE_STATUS_FINALIZING:
+            oper.Cleanup()
+            logging.info('Finalizing image.')
+            end_message_not_printed = False
+
+        time.sleep(update_check_interval)
+    # TODO(ahassani): Scope the Exception to finer levels. For example we don't
+    # need to revert the boot partition if the Nebraska fails to start, etc.
+    except Exception as e:
+      logging.error('Rootfs update failed %s', e)
+      self.RevertBootPartition()
+      logging.warning(nebraska.PrintLog() or 'No nebraska log is available.')
+      raise RootfsUpdateError('Failed to perform rootfs update: %r' % e)
+    finally:
+      nebraska.Stop()
+
+      nebraska.CollectLogs(os.path.join(self.tempdir,
+                                        self.LOCAL_NEBRASKA_LOG_FILENAME))
+      self.device.CopyFromDevice(
+          self.REMOTE_UPDATE_ENGINE_LOGFILE_PATH,
+          os.path.join(self.tempdir, os.path.basename(
+              self.REMOTE_UPDATE_ENGINE_LOGFILE_PATH)),
+          follow_symlinks=True, **self._cmd_kwargs_omit_error)
+
+  def UpdateStateful(self):
+    """Update the stateful partition of the device."""
+    try:
+      stateful_update_payload = os.path.join(
+          self.device.work_dir, auto_updater_transfer.STATEFUL_FILENAME)
+
+      updater = stateful_updater.StatefulUpdater(self.device)
+      updater.Update(
+          stateful_update_payload,
+          update_type=(stateful_updater.StatefulUpdater.UPDATE_TYPE_CLOBBER if
+                       self._clobber_stateful else None))
+
+      # Delete the stateful update file on success so it doesn't occupy extra
+      # disk space. On failure it will get cleaned up.
+      self.device.DeletePath(stateful_update_payload)
+    except stateful_updater.Error as e:
+      error_msg = 'Stateful update failed with error: %s' % str(e)
+      logging.exception(error_msg)
+      self.ResetStatefulPartition()
+      raise StatefulUpdateError(error_msg)
+
+  def _FixPayloadPropertiesFile(self):
+    """Fix the update payload properties file so nebraska can use it.
+
+    Update the payload properties file to make sure that nebraska can use it.
+    The reason is that very old payloads are still being used for provisioning
+    the AU tests, but those properties files are not compatible with recent
+    nebraska protocols.
+
+    TODO(ahassani): Once we only test delta or full payload with
+    source image of M77 or higher, this function can be deprecated.
+    """
+    logging.info('Fixing payload properties file.')
+    payload_properties_path = self._transfer_obj.GetPayloadPropsFile()
+    props = json.loads(osutils.ReadFile(payload_properties_path))
+    props['appid'] = self.ResolveAPPIDMismatchIfAny(props.get('appid'))
+    values = self._transfer_obj.GetPayloadProps()
+
+    # TODO(ahassani): Use the keys form nebraska.py once it is moved to
+    # chromite.
+    valid_entries = {
+        # Since only old payloads don't have this and they are only used for
+        # provisioning, they will be full payloads.
+        'is_delta': False,
+        'size': values['size'],
+        'target_version': values['image_version'],
+    }
+
+    for key, value in valid_entries.items():
+      if props.get(key) is None:
+        props[key] = value
+
+    with open(payload_properties_path, 'w') as fp:
+      json.dump(props, fp)
+
+  def RunUpdateRootfs(self):
+    """Run all processes needed by updating rootfs.
+
+    1. Check device's status to make sure it can be updated.
+    2. Copy files to remote device needed for rootfs update.
+    3. Do root updating.
+    """
+
+    # Any call to self._transfer_obj.TransferRootfsUpdate() must be preceeded by
+    # a conditional call to self._FixPayloadPropertiesFile() as this handles the
+    # usecase in reported in crbug.com/1012520. Whenever
+    # self._FixPayloadPropertiesFile() gets deprecated, this call can be safely
+    # removed. For more details on TODOs, refer to self.TransferRootfsUpdate()
+    # docstrings.
+
+    self._FixPayloadPropertiesFile()
+
+    # SetupRootfsUpdate() may reboot the device and therefore should be called
+    # before any payloads are transferred to the device and only if rootfs
+    # update is required.
+    self.SetupRootfsUpdate()
+
+    # Copy payload for rootfs update.
+    self._transfer_obj.TransferRootfsUpdate()
+
+    self.UpdateRootfs()
+
+    if self.is_au_endtoendtest:
+      self.PostCheckRootfsUpdate()
+
+    # Delete the update file so it doesn't take much space on disk for the
+    # remainder of the update process.
+    self.device.DeletePath(self.device_payload_dir, recursive=True)
+
+  def RunUpdateStateful(self):
+    """Run all processes needed by updating stateful.
+
+    1. Copy files to remote device needed by stateful update.
+    2. Do stateful update.
+    """
+    self._transfer_obj.TransferStatefulUpdate()
+    self.UpdateStateful()
+
+  def RebootAndVerify(self):
+    """Reboot and verify the remote device.
+
+    1. Reboot the remote device. If _clobber_stateful (--clobber-stateful)
+    is executed, the stateful partition is wiped, and the working directory
+    on the remote device no longer exists. So, recreate the working directory
+    for this remote device.
+    2. Verify the remote device, by checking that whether the root device
+    changed after reboot.
+    """
+    logging.notice('Rebooting device...')
+    # Record the current root device. This must be done after SetupRootfsUpdate
+    # and before reboot, since SetupRootfsUpdate may reboot the device if there
+    # is a pending update, which changes the root device, and reboot will
+    # definitely change the root device if update successfully finishes.
+    old_root_dev = self.GetRootDev(self.device)
+    self.device.Reboot()
+    if self._clobber_stateful:
+      self.device.run(['mkdir', '-p', self.device.work_dir])
+
+    if self._do_rootfs_update:
+      logging.notice('Verifying that the device has been updated...')
+      new_root_dev = self.GetRootDev(self.device)
+      if old_root_dev is None:
+        raise AutoUpdateVerifyError(
+            'Failed to locate root device before update.')
+
+      if new_root_dev is None:
+        raise AutoUpdateVerifyError(
+            'Failed to locate root device after update.')
+
+      if new_root_dev == old_root_dev:
+        raise AutoUpdateVerifyError(
+            'Failed to boot into the new version. Possibly there was a '
+            'signing problem, or an automated rollback occurred because '
+            'your new image failed to boot.')
+
+  def ResolveAPPIDMismatchIfAny(self, payload_app_id):
+    """Resolves and APP ID mismatch between the payload and device.
+
+    If the APP ID of the payload is different than the device, then the nebraska
+    will fail. We empty the payload's AppID so nebraska can do partial APP ID
+    matching.
+    """
+    if ((self.device.app_id and self.device.app_id == payload_app_id) or
+        payload_app_id == ''):
+      return payload_app_id
+
+    logging.warning('You are installing an image with a different release '
+                    'App ID than the device (%s vs %s), we are forcing the '
+                    'install!', payload_app_id, self.device.app_id)
+    return ''
+
+  def RunUpdate(self):
+    """Update the device with image of specific version."""
+    self._transfer_obj.CheckPayloads()
+
+    self._transfer_obj.TransferUpdateUtilsPackage()
+
+    restore_stateful = self.CheckRestoreStateful()
+    if restore_stateful:
+      self.RestoreStateful()
+
+    # Perform device updates.
+    if self._do_rootfs_update:
+      self.RunUpdateRootfs()
+      logging.info('Rootfs update completed.')
+
+    if self._do_stateful_update and not restore_stateful:
+      self.RunUpdateStateful()
+      logging.info('Stateful update completed.')
+
+    if self._clear_tpm_owner:
+      self.SetClearTpmOwnerRequest()
+
+    if self._reboot:
+      self.RebootAndVerify()
+
+    if self.is_au_endtoendtest:
+      self.PostCheckCrOSUpdate()
+
+    if self._disable_verification:
+      logging.info('Disabling rootfs verification on the device...')
+      self.device.DisableRootfsVerification()
+
+  def _Reboot(self, error_stage, timeout=None):
+    try:
+      if timeout is None:
+        timeout = self.REBOOT_TIMEOUT
+      self.device.Reboot(timeout_sec=timeout)
+    except cros_build_lib.DieSystemExit:
+      raise ChromiumOSUpdateError('Could not recover from reboot at %s' %
+                                  error_stage)
+    except remote_access.SSHConnectionError:
+      raise ChromiumOSUpdateError('Failed to connect at %s' % error_stage)
+
+  def _cgpt(self, flag, kernel, dev='$(rootdev -s -d)'):
+    """Return numeric cgpt value for the specified flag, kernel, device."""
+    cmd = ['cgpt', 'show', '-n', '-i', '%d' % kernel['kernel'], flag, dev]
+    return int(self._RetryCommand(
+        cmd, capture_output=True, log_output=True).output.strip())
+
+  def _GetKernelPriority(self, kernel):
+    """Return numeric priority for the specified kernel.
+
+    Args:
+      kernel: information of the given kernel, KERNEL_A or KERNEL_B.
+    """
+    return self._cgpt('-P', kernel)
+
+  def _GetKernelSuccess(self, kernel):
+    """Return boolean success flag for the specified kernel.
+
+    Args:
+      kernel: information of the given kernel, KERNEL_A or KERNEL_B.
+    """
+    return self._cgpt('-S', kernel) != 0
+
+  def _GetKernelTries(self, kernel):
+    """Return tries count for the specified kernel.
+
+    Args:
+      kernel: information of the given kernel, KERNEL_A or KERNEL_B.
+    """
+    return self._cgpt('-T', kernel)
+
+  def _GetKernelState(self):
+    """Returns the (<active>, <inactive>) kernel state as a pair."""
+    active_root = int(re.findall(r'(\d+\Z)', self.GetRootDev(self.device))[0])
+    if active_root == self.KERNEL_A['root']:
+      return self.KERNEL_A, self.KERNEL_B
+    elif active_root == self.KERNEL_B['root']:
+      return self.KERNEL_B, self.KERNEL_A
+    else:
+      raise ChromiumOSUpdateError('Encountered unknown root partition: %s' %
+                                  active_root)
+
+  def _GetReleaseVersion(self):
+    """Get release version of the device."""
+    lsb_release_content = self._RetryCommand(
+        ['cat', '/etc/lsb-release'],
+        capture_output=True, log_output=True).output.strip()
+    regex = r'^CHROMEOS_RELEASE_VERSION=(.+)$'
+    return auto_update_util.GetChromeosBuildInfo(
+        lsb_release_content=lsb_release_content, regex=regex)
+
+  def _GetReleaseBuilderPath(self):
+    """Get release version of the device."""
+    lsb_release_content = self._RetryCommand(
+        ['cat', '/etc/lsb-release'],
+        capture_output=True, log_output=True).output.strip()
+    regex = r'^CHROMEOS_RELEASE_BUILDER_PATH=(.+)$'
+    return auto_update_util.GetChromeosBuildInfo(
+        lsb_release_content=lsb_release_content, regex=regex)
+
+  def CheckVersion(self):
+    """Check the image running in DUT has the expected version.
+
+    Returns:
+      True if the DUT's image version matches the version that the
+      ChromiumOSUpdater tries to update to.
+    """
+    if not self.update_version:
+      return False
+
+    # Use CHROMEOS_RELEASE_BUILDER_PATH to match the build version if it exists
+    # in lsb-release, otherwise, continue using CHROMEOS_RELEASE_VERSION.
+    release_builder_path = self._GetReleaseBuilderPath()
+    if release_builder_path:
+      return self.update_version == release_builder_path
+
+    return self.update_version.endswith(self._GetReleaseVersion())
+
+  def _VerifyBootExpectations(self, expected_kernel_state, rollback_message):
+    """Verify that we fully booted given expected kernel state.
+
+    It verifies that we booted using the correct kernel state, and that the
+    OS has marked the kernel as good.
+
+    Args:
+      expected_kernel_state: kernel state that we're verifying with i.e. I
+        expect to be booted onto partition 4 etc. See output of _GetKernelState.
+      rollback_message: string to raise as a RootfsUpdateError if we booted
+        with the wrong partition.
+    """
+    logging.debug('Start verifying boot expectations...')
+    # Figure out the newly active kernel
+    active_kernel_state = self._GetKernelState()[0]
+
+    # Rollback
+    if (expected_kernel_state and
+        active_kernel_state != expected_kernel_state):
+      logging.debug('Dumping partition table.')
+      self.device.run(['cgpt', 'show', '$(rootdev -s -d)'],
+                      **self._cmd_kwargs)
+      logging.debug('Dumping crossystem for firmware debugging.')
+      self.device.run(['crossystem', '--all'], **self._cmd_kwargs)
+      raise RootfsUpdateError(rollback_message)
+
+    # Make sure chromeos-setgoodkernel runs
+    try:
+      timeout_util.WaitForReturnTrue(
+          lambda: (self._GetKernelTries(active_kernel_state) == 0
+                   and self._GetKernelSuccess(active_kernel_state)),
+          self.KERNEL_UPDATE_TIMEOUT,
+          period=5)
+    except timeout_util.TimeoutError:
+      services_status = self.device.run(
+          ['status', 'system-services'], capture_output=True,
+          log_output=True).output
+      logging.debug('System services_status: %r', services_status)
+      if services_status != 'system-services start/running\n':
+        event = ('Chrome failed to reach login screen')
+      else:
+        event = ('update-engine failed to call '
+                 'chromeos-setgoodkernel')
+      raise RootfsUpdateError(
+          'After update and reboot, %s '
+          'within %d seconds' % (event, self.KERNEL_UPDATE_TIMEOUT))
+
+  def _CheckVersionToConfirmInstall(self):
+    logging.debug('Checking whether the new build is successfully installed...')
+    if not self.update_version:
+      logging.debug('No update_version is provided if test is executed with'
+                    'local nebraska.')
+      return True
+
+    # Always try the default check_version method first, this prevents
+    # any backward compatibility issue.
+    if self.CheckVersion():
+      return True
+
+    return auto_update_util.VersionMatch(
+        self.update_version, self._GetReleaseVersion())
+
+  def _RetryCommand(self, cmd, **kwargs):
+    """Retry commands if SSHConnectionError happens.
+
+    Args:
+      cmd: the command to be run by device.
+      kwargs: the parameters for device to run the command.
+
+    Returns:
+      the output of running the command.
+    """
+    return retry_util.RetryException(
+        remote_access.SSHConnectionError,
+        MAX_RETRY,
+        self.device.run,
+        cmd, delay_sec=DELAY_SEC_FOR_RETRY,
+        shell=isinstance(cmd, six.string_types),
+        **kwargs)
+
+  def PreSetupStatefulUpdate(self):
+    """Pre-setup for stateful update for CrOS host."""
+    logging.debug('Start pre-setup for stateful update...')
+    if self._clobber_stateful:
+      for folder in self.REMOTE_STATEFUL_PATH_TO_CHECK:
+        touch_path = os.path.join(folder, self.REMOTE_STATEFUL_TEST_FILENAME)
+        self._RetryCommand(['touch', touch_path], **self._cmd_kwargs)
+
+  def PostCheckStatefulUpdate(self):
+    """Post-check for stateful update for CrOS host."""
+    logging.debug('Start post check for stateful update...')
+    self._Reboot('post check of stateful update')
+    if self._clobber_stateful:
+      for folder in self.REMOTE_STATEFUL_PATH_TO_CHECK:
+        test_file_path = os.path.join(folder,
+                                      self.REMOTE_STATEFUL_TEST_FILENAME)
+        # If stateful update succeeds, these test files should not exist.
+        if self.device.IfFileExists(test_file_path,
+                                    **self._cmd_kwargs_omit_error):
+          raise StatefulUpdateError('failed to post-check stateful update.')
+
+  def _IsUpdateUtilsPackageInstalled(self):
+    """Check whether update-utils package is well installed.
+
+    There's a chance that nebraska package is removed in the middle of
+    auto-update process. This function double check it and transfer it if it's
+    removed.
+    """
+    logging.info('Checking whether nebraska files are still on the device...')
+    try:
+      nebraska_bin = os.path.join(self.device_dev_dir,
+                                  self.REMOTE_NEBRASKA_FILENAME)
+      if not self.device.IfFileExists(
+          nebraska_bin, **self._cmd_kwargs_omit_error):
+        logging.info('Nebraska files not found on device. Resending them...')
+
+        self._transfer_obj.TransferUpdateUtilsPackage()
+
+      return True
+    except cros_build_lib.RunCommandError as e:
+      logging.warning('Failed to verify whether packages still exist: %s', e)
+      return False
+
+  def CheckNebraskaCanRun(self):
+    """Check if nebraska can successfully run for ChromiumOSUpdater."""
+    self._IsUpdateUtilsPackageInstalled()
+    self._CheckNebraskaCanRun()
+
+  def RestoreStateful(self):
+    """Restore stateful partition for device."""
+    logging.warning('Restoring the stateful partition.')
+    self.PreSetupStatefulUpdate()
+    self._transfer_obj.TransferStatefulUpdate()
+    self.ResetStatefulPartition()
+    self.UpdateStateful()
+    self.PostCheckStatefulUpdate()
+    try:
+      self.CheckNebraskaCanRun()
+      logging.info('Stateful partition restored.')
+    except nebraska_wrapper.NebraskaStartupError as e:
+      raise ChromiumOSUpdateError(
+          'Unable to restore stateful partition: %s' % e)
+
+  def SetClearTpmOwnerRequest(self):
+    """Set clear_tpm_owner_request flag."""
+    # The issue is that certain AU tests leave the TPM in a bad state which
+    # most commonly shows up in provisioning.  Executing this 'crossystem'
+    # command before rebooting clears the problem state during the reboot.
+    # It's also worth mentioning that this isn't a complete fix:  The bad
+    # TPM state in theory might happen some time other than during
+    # provisioning.  Also, the bad TPM state isn't supposed to happen at
+    # all; this change is just papering over the real bug.
+    logging.info('Setting clear_tpm_owner_request to 1.')
+    self._RetryCommand('crossystem clear_tpm_owner_request=1',
+                       **self._cmd_kwargs_omit_error)
+
+  def PostCheckRootfsUpdate(self):
+    """Post-check for rootfs update for CrOS host."""
+    logging.debug('Start post check for rootfs update...')
+    active_kernel, inactive_kernel = self._GetKernelState()
+    logging.debug('active_kernel= %s, inactive_kernel=%s',
+                  active_kernel, inactive_kernel)
+    if (self._GetKernelPriority(inactive_kernel) <
+        self._GetKernelPriority(active_kernel)):
+      raise RootfsUpdateError('Update failed. The priority of the inactive '
+                              'kernel partition is less than that of the '
+                              'active kernel partition.')
+    self.inactive_kernel = inactive_kernel
+
+  def PostCheckCrOSUpdate(self):
+    """Post check for the whole auto-update process."""
+    logging.debug('Post check for the whole CrOS update...')
+    start_time = time.time()
+    # Not use 'sh' here since current device.run cannot recognize
+    # the content of $FILE.
+    autoreboot_cmd = ('FILE="%s" ; [ -f "$FILE" ] || '
+                      '( touch "$FILE" ; start autoreboot )')
+    self._RetryCommand(autoreboot_cmd % self.REMOTE_LAB_MACHINE_FILE_PATH,
+                       **self._cmd_kwargs)
+
+    # Loop in case the initial check happens before the reboot.
+    while True:
+      try:
+        start_verify_time = time.time()
+        self._VerifyBootExpectations(
+            self.inactive_kernel, rollback_message=
+            'Build %s failed to boot on %s; system rolled back to previous '
+            'build' % (self.update_version, self.device.hostname))
+
+        # Check that we've got the build we meant to install.
+        if not self._CheckVersionToConfirmInstall():
+          raise ChromiumOSUpdateError(
+              'Failed to update %s to build %s; found build '
+              '%s instead' % (self.device.hostname,
+                              self.update_version,
+                              self._GetReleaseVersion()))
+      except RebootVerificationError as e:
+        # If a minimum amount of time since starting the check has not
+        # occurred, wait and retry.  Use the start of the verification
+        # time in case an SSH call takes a long time to return/fail.
+        if start_verify_time - start_time < POST_CHECK_SETTLE_SECONDS:
+          logging.warning('Delaying for re-check of %s to update to %s (%s)',
+                          self.device.hostname, self.update_version, e)
+          time.sleep(POST_CHECK_RETRY_SECONDS)
+          continue
+        raise
+      break
+
+    if not self._clobber_stateful:
+      self.PostRebootUpdateCheckForAUTest()
+
+  def PostRebootUpdateCheckForAUTest(self):
+    """Do another update check after reboot to get the post update hostlog.
+
+    This is only done with autoupdate_EndToEndTest.
+    """
+    logging.debug('Doing one final update check to get post update hostlog.')
+    nebraska_bin = os.path.join(self.device_dev_dir,
+                                self.REMOTE_NEBRASKA_FILENAME)
+    nebraska = nebraska_wrapper.RemoteNebraskaWrapper(
+        self.device, nebraska_bin=nebraska_bin,
+        update_metadata_dir=self.device.work_dir)
+
+    try:
+      nebraska.Start()
+
+      nebraska_url = nebraska.GetURL(critical_update=True, no_update=True)
+      cmd = [self.REMOTE_UPDATE_ENGINE_BIN_FILENAME, '--check_for_update',
+             '--omaha_url="%s"' % nebraska_url]
+      self.device.run(cmd, **self._cmd_kwargs)
+      op = self.GetUpdateStatus(self.device)[0]
+      logging.info('Post update check status: %s', op)
+    except Exception as err:
+      logging.error('Post reboot update check failed: %s', str(err))
+      logging.warning(nebraska.PrintLog() or 'No nebraska log is available.')
+    finally:
+      nebraska.Stop()
diff --git a/utils/frozen_chromite/lib/auto_updater_transfer.py b/utils/frozen_chromite/lib/auto_updater_transfer.py
new file mode 100644
index 0000000..9805bcd
--- /dev/null
+++ b/utils/frozen_chromite/lib/auto_updater_transfer.py
@@ -0,0 +1,619 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Library containing functions to transfer files onto a remote device.
+
+Transfer Base class includes:
+
+  ----Tranfer----
+  * @retry functionality for all public transfer functions.
+
+LocalTransfer includes:
+
+  ----Precheck---
+  * Pre-check payload's existence before auto-update.
+
+  ----Tranfer----
+  * Transfer update-utils (nebraska, et. al.) package at first.
+  * Transfer rootfs update files if rootfs update is required.
+  * Transfer stateful update files if stateful update is required.
+
+LabTransfer includes:
+
+  ----Precheck---
+  * Pre-check payload's existence on the staging server before auto-update.
+
+  ----Tranfer----
+  * Download the update-utils (nebraska, et. al.) package onto the DUT directly
+    from the staging server at first.
+  * Download rootfs update files onto the DUT directly from the staging server
+    if rootfs update is required.
+  * Download stateful update files onto the DUT directly from the staging server
+    if stateful update is required.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import abc
+import json
+import os
+import re
+
+import six
+from six.moves import urllib
+
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import nebraska_wrapper
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+
+# Naming conventions for global variables:
+#   Path on remote host with slash: REMOTE_XXX_PATH
+#   File on local server without slash: LOCAL_XXX_FILENAME
+#   Path on local server: LOCAL_XXX_PATH
+
+# Max number of the times for retry:
+# 1. for transfer functions to be retried.
+# 2. for some retriable commands to be retried.
+_MAX_RETRY = 5
+
+# The delay between retriable tasks.
+_DELAY_SEC_FOR_RETRY = 5
+
+# Update file names for rootfs+kernel and stateful partitions.
+ROOTFS_FILENAME = 'update.gz'
+STATEFUL_FILENAME = 'stateful.tgz'
+
+# Regular expression that is used to evaluate payload names to determine payload
+# validity.
+_PAYLOAD_PATTERN = r'payloads/chromeos_(?P<image_version>[^_]+)_.*'
+
+# File copying modes.
+_SCP = 'scp'
+
+
+class Error(Exception):
+  """A generic auto updater transfer error."""
+
+
+class ChromiumOSTransferError(Error):
+  """Thrown when there is a general transfer specific error."""
+
+
+def GetPayloadPropertiesFileName(payload):
+  """Returns the payload properties file given the path to the payload."""
+  return payload + '.json'
+
+
+class Transfer(six.with_metaclass(abc.ABCMeta, object)):
+  """Abstract Base Class that handles payload precheck and transfer."""
+
+  PAYLOAD_DIR_NAME = 'payloads'
+
+  def __init__(self, device, payload_dir, tempdir,
+               payload_name, cmd_kwargs, device_payload_dir,
+               payload_mode='scp', transfer_stateful_update=True,
+               transfer_rootfs_update=True):
+    """Initialize Base Class for transferring payloads functionality.
+
+    Args:
+      device: The ChromiumOSDevice to be updated.
+      payload_dir: The directory of payload(s).
+      tempdir: The temp directory in caller, not in the device. For example,
+          the tempdir for cros flash is /tmp/cros-flash****/, used to
+          temporarily keep files when transferring update-utils package, and
+          reserve nebraska and update engine logs.
+      payload_name: Filename of exact payload file to use for update.
+      cmd_kwargs: Keyword arguments that are sent along with the commands that
+          are run on the device.
+      device_payload_dir: Path to the payload directory in the device's work
+          directory.
+      payload_mode: The payload mode - it can be 'parallel' or 'scp'.
+      transfer_stateful_update: Whether to transfer payloads necessary for
+          stateful update. The default is True.
+      transfer_rootfs_update: Whether to transfer payloads necessary for
+          rootfs update. The default is True.
+    """
+    self._device = device
+    self._payload_dir = payload_dir
+    self._tempdir = tempdir
+    self._payload_name = payload_name
+    self._cmd_kwargs = cmd_kwargs
+    self._device_payload_dir = device_payload_dir
+    if payload_mode not in ('scp', 'parallel'):
+      raise ValueError('The given value %s for payload mode is not valid.' %
+                       payload_mode)
+    self._payload_mode = payload_mode
+    self._transfer_stateful_update = transfer_stateful_update
+    self._transfer_rootfs_update = transfer_rootfs_update
+    self._local_payload_props_path = None
+
+  @abc.abstractmethod
+  def CheckPayloads(self):
+    """Verify that all required payloads are in |self.payload_dir|."""
+
+  def TransferUpdateUtilsPackage(self):
+    """Transfer update-utils package to work directory of the remote device."""
+    retry_util.RetryException(
+        cros_build_lib.RunCommandError,
+        _MAX_RETRY,
+        self._TransferUpdateUtilsPackage,
+        delay_sec=_DELAY_SEC_FOR_RETRY)
+
+  def TransferRootfsUpdate(self):
+    """Transfer files for rootfs update.
+
+    The corresponding payloads are copied to the remote device for rootfs
+    update.
+    """
+    retry_util.RetryException(
+        cros_build_lib.RunCommandError,
+        _MAX_RETRY,
+        self._TransferRootfsUpdate,
+        delay_sec=_DELAY_SEC_FOR_RETRY)
+
+  def TransferStatefulUpdate(self):
+    """Transfer files for stateful update.
+
+    The stateful update bin and the corresponding payloads are copied to the
+    target remote device for stateful update.
+    """
+    retry_util.RetryException(
+        cros_build_lib.RunCommandError,
+        _MAX_RETRY,
+        self._TransferStatefulUpdate,
+        delay_sec=_DELAY_SEC_FOR_RETRY)
+
+  def _EnsureDeviceDirectory(self, directory):
+    """Mkdir the directory no matther whether this directory exists on host.
+
+    Args:
+      directory: The directory to be made on the device.
+    """
+    self._device.run(['mkdir', '-p', directory], **self._cmd_kwargs)
+
+  @abc.abstractmethod
+  def GetPayloadPropsFile(self):
+    """Get the payload properties file path."""
+
+  @abc.abstractmethod
+  def GetPayloadProps(self):
+    """Gets properties necessary to fix the payload properties file.
+
+    Returns:
+      Dict in the format: {'image_version': 12345.0.0, 'size': 123456789}.
+    """
+
+  def _GetPayloadFormat(self):
+    """Gets the payload format that should be evaluated.
+
+    Returns:
+      The payload name as a string.
+    """
+    return self._payload_name
+
+  def _GetPayloadPattern(self):
+    """The regex pattern that the payload format must match.
+
+    Returns:
+      Regular expression.
+    """
+    return _PAYLOAD_PATTERN
+
+
+class LocalTransfer(Transfer):
+  """Abstracts logic that handles transferring local files to the DUT."""
+
+  def __init__(self, *args, **kwargs):
+    """Initialize LocalTransfer to handle transferring files from local to DUT.
+
+    Args:
+      *args: The list of arguments to be passed. See Base class for a complete
+          list of accepted arguments.
+      **kwargs: Any keyword arguments to be passed. See Base class for a
+          complete list of accepted keyword arguments.
+    """
+    super(LocalTransfer, self).__init__(*args, **kwargs)
+
+  def CheckPayloads(self):
+    """Verify that all required payloads are in |self.payload_dir|."""
+    logging.debug('Checking if payloads have been stored in directory %s...',
+                  self._payload_dir)
+    filenames = []
+
+    if self._transfer_rootfs_update:
+      filenames += [self._payload_name,
+                    GetPayloadPropertiesFileName(self._payload_name)]
+
+    if self._transfer_stateful_update:
+      filenames += [STATEFUL_FILENAME]
+
+    for fname in filenames:
+      payload = os.path.join(self._payload_dir, fname)
+      if not os.path.exists(payload):
+        raise ChromiumOSTransferError('Payload %s does not exist!' % payload)
+
+  def _TransferUpdateUtilsPackage(self):
+    """Transfer update-utils package to work directory of the remote device."""
+    logging.notice('Copying update script to device...')
+    source_dir = os.path.join(self._tempdir, 'src')
+    osutils.SafeMakedirs(source_dir)
+    nebraska_wrapper.RemoteNebraskaWrapper.GetNebraskaSrcFile(source_dir)
+
+    # Make sure the device.work_dir exists after any installation and reboot.
+    self._EnsureDeviceDirectory(self._device.work_dir)
+    # Python packages are plain text files.
+    self._device.CopyToWorkDir(source_dir, mode=_SCP, log_output=True,
+                               **self._cmd_kwargs)
+
+  def _TransferRootfsUpdate(self):
+    """Transfer files for rootfs update.
+
+    Copy the update payload to the remote device for rootfs update.
+    """
+    self._EnsureDeviceDirectory(self._device_payload_dir)
+    logging.notice('Copying rootfs payload to device...')
+    payload = os.path.join(self._payload_dir, self._payload_name)
+    self._device.CopyToWorkDir(payload, self.PAYLOAD_DIR_NAME,
+                               mode=self._payload_mode,
+                               log_output=True, **self._cmd_kwargs)
+    payload_properties_path = GetPayloadPropertiesFileName(payload)
+    self._device.CopyToWorkDir(payload_properties_path, self.PAYLOAD_DIR_NAME,
+                               mode=self._payload_mode,
+                               log_output=True, **self._cmd_kwargs)
+
+  def _TransferStatefulUpdate(self):
+    """Transfer files for stateful update.
+
+    The stateful update payloads are copied to the target remote device for
+    stateful update.
+    """
+    logging.notice('Copying target stateful payload to device...')
+    payload = os.path.join(self._payload_dir, STATEFUL_FILENAME)
+    self._device.CopyToWorkDir(payload, mode=self._payload_mode,
+                               log_output=True, **self._cmd_kwargs)
+
+  def GetPayloadPropsFile(self):
+    """Finds the local payload properties file."""
+    # Payload properties file is available locally so just catch it next to the
+    # payload file.
+    if self._local_payload_props_path is None:
+      self._local_payload_props_path = os.path.join(
+          self._payload_dir, GetPayloadPropertiesFileName(self._payload_name))
+    return self._local_payload_props_path
+
+  def GetPayloadProps(self):
+    """Gets image_version from the payload_name and size of the payload.
+
+    The payload_dir must be in the format <board>/Rxx-12345.0.0 for a complete
+    match; else a ValueError will be raised. In case the payload filename is
+    update.gz, then image_version cannot be extracted from its name; therefore,
+    image_version is set to a dummy 99999.0.0.
+
+    Returns:
+      Dict - See parent class's function for full details.
+    """
+    payload_filepath = os.path.join(self._payload_dir, self._payload_name)
+    values = {
+        'image_version': '99999.0.0',
+        'size': os.path.getsize(payload_filepath)
+    }
+    if self._payload_name != ROOTFS_FILENAME:
+      payload_format = self._GetPayloadFormat()
+      payload_pattern = self._GetPayloadPattern()
+      m = re.match(payload_pattern, payload_format)
+      if not m:
+        raise ValueError(
+            'Regular expression %r did not match the expected payload format '
+            '%s' % (payload_pattern, payload_format))
+      values.update(m.groupdict())
+    return values
+
+
+class LabTransfer(Transfer):
+  """Abstracts logic that transfers files from staging server to the DUT."""
+
+  def __init__(self, staging_server, *args, **kwargs):
+    """Initialize LabTransfer to transfer files from staging server to DUT.
+
+    Args:
+      staging_server: Url of the server that's staging the payload files.
+      *args: The list of arguments to be passed. See Base class for a complete
+          list of accepted arguments.
+      **kwargs: Any keyword arguments to be passed. See Base class for a
+          complete list of accepted keyword arguments.
+    """
+    self._staging_server = staging_server
+    super(LabTransfer, self).__init__(*args, **kwargs)
+
+  def _GetPayloadFormat(self):
+    """Gets the payload format that should be evaluated.
+
+    Returns:
+      The payload dir as a string.
+    """
+    return self._payload_dir
+
+  def _GetPayloadPattern(self):
+    """The regex pattern that the payload format must match.
+
+    Returns:
+      Regular expression.
+    """
+    return r'.*/(R[0-9]+-)(?P<image_version>.+)'
+
+  def _RemoteDevserverCall(self, cmd, stdout=False):
+    """Runs a command on a remote devserver by sshing into it.
+
+    Raises cros_build_lib.RunCommandError() if the command could not be run
+    successfully.
+
+    Args:
+      cmd: (list) the command to be run.
+      stdout: True if the stdout of the command should be captured.
+    """
+    ip = urllib.parse.urlparse(self._staging_server).hostname
+    return cros_build_lib.run(['ssh', ip] + cmd, log_output=True, stdout=stdout)
+
+  def _CheckPayloads(self, payload_name):
+    """Runs the curl command that checks if payloads have been staged."""
+    payload_url = self._GetStagedUrl(staged_filename=payload_name,
+                                     build_id=self._payload_dir)
+    cmd = ['curl', '-I', payload_url, '--fail']
+    try:
+      self._RemoteDevserverCall(cmd)
+    except cros_build_lib.RunCommandError as e:
+      raise ChromiumOSTransferError(
+          'Could not verify if %s was staged at %s. Received exception: %s' %
+          (payload_name, payload_url, e))
+
+  def CheckPayloads(self):
+    """Verify that all required payloads are staged on staging server."""
+    logging.debug('Checking if payloads have been staged on server %s...',
+                  self._staging_server)
+
+    if self._transfer_rootfs_update:
+      self._CheckPayloads(self._payload_name)
+      self._CheckPayloads(GetPayloadPropertiesFileName(self._payload_name))
+
+    if self._transfer_stateful_update:
+      self._CheckPayloads(STATEFUL_FILENAME)
+
+  def _GetStagedUrl(self, staged_filename, build_id=None):
+    """Returns a valid url to check availability of staged files.
+
+    Args:
+      staged_filename: Name of the staged file.
+      build_id: This is the path at which the needed file can be found. It
+        is usually of the format <board_name>-release/R79-12345.6.0. By default,
+        the path is set to be None.
+
+    Returns:
+      A URL in the format:
+        http://<ip>:<port>/static/<board>-release/<version>/<staged_filename>
+    """
+    # Formulate the download URL out of components.
+    url = urllib.parse.urljoin(self._staging_server, 'static/')
+    if build_id:
+      # Add slash at the end of image_name if necessary.
+      if not build_id.endswith('/'):
+        build_id = build_id + '/'
+      url = urllib.parse.urljoin(url, build_id)
+    return urllib.parse.urljoin(url, staged_filename)
+
+  def _GetCurlCmdForPayloadDownload(self, payload_dir, payload_filename,
+                                    build_id=None):
+    """Returns a valid curl command to download payloads into device tmp dir.
+
+    Args:
+      payload_dir: Path to the payload directory on the device.
+      payload_filename: Name of the file by which the downloaded payload should
+        be saved. This is assumed to be the same as the name of the payload.
+      build_id: This is the path at which the needed payload can be found. It
+        is usually of the format <board_name>-release/R79-12345.6.0. By default,
+        the path is set to None.
+
+    Returns:
+      A fully formed curl command in the format:
+        ['curl', '-o', '<path where payload should be saved>',
+         '<payload download URL>']
+    """
+    return ['curl', '-o', os.path.join(payload_dir, payload_filename),
+            self._GetStagedUrl(payload_filename, build_id)]
+
+  def _TransferUpdateUtilsPackage(self):
+    """Transfer update-utils package to work directory of the remote device.
+
+    The update-utils package will be transferred to the device from the
+    staging server via curl.
+    """
+    logging.notice('Copying update script to device...')
+    source_dir = os.path.join(self._device.work_dir, 'src')
+    self._EnsureDeviceDirectory(source_dir)
+
+    self._device.run(self._GetCurlCmdForPayloadDownload(
+        payload_dir=source_dir,
+        payload_filename=nebraska_wrapper.NEBRASKA_FILENAME))
+
+    # Make sure the device.work_dir exists after any installation and reboot.
+    self._EnsureDeviceDirectory(self._device.work_dir)
+
+  def _TransferStatefulUpdate(self):
+    """Transfer files for stateful update.
+
+    The stateful update bin and the corresponding payloads are copied to the
+    target remote device for stateful update from the staging server via curl.
+    """
+    self._EnsureDeviceDirectory(self._device_payload_dir)
+
+    # TODO(crbug.com/1024639): Another way to make the payloads available is
+    # to make update_engine download it directly from the staging_server. This
+    # will avoid a disk copy but has the potential to be harder to debug if
+    # update engine does not report the error clearly.
+
+    logging.notice('Copying target stateful payload to device...')
+    self._device.run(self._GetCurlCmdForPayloadDownload(
+        payload_dir=self._device.work_dir, build_id=self._payload_dir,
+        payload_filename=STATEFUL_FILENAME))
+
+  def _TransferRootfsUpdate(self):
+    """Transfer files for rootfs update.
+
+    Copy the update payload to the remote device for rootfs update from the
+    staging server via curl.
+    """
+    self._EnsureDeviceDirectory(self._device_payload_dir)
+
+    logging.notice('Copying rootfs payload to device...')
+
+    # TODO(crbug.com/1024639): Another way to make the payloads available is
+    # to make update_engine download it directly from the staging_server. This
+    # will avoid a disk copy but has the potential to be harder to debug if
+    # update engine does not report the error clearly.
+
+    self._device.run(self._GetCurlCmdForPayloadDownload(
+        payload_dir=self._device_payload_dir, build_id=self._payload_dir,
+        payload_filename=self._payload_name))
+
+    self._device.CopyToWorkDir(src=self._local_payload_props_path,
+                               dest=self.PAYLOAD_DIR_NAME,
+                               mode=self._payload_mode,
+                               log_output=True, **self._cmd_kwargs)
+
+  def GetPayloadPropsFile(self):
+    """Downloads the PayloadProperties file onto the drone.
+
+    The payload properties file may be required to be updated in
+    auto_updater.ResolveAppIsMismatchIfAny(). Download the file from where it
+    has been staged on the staging server into the tempdir of the drone, so that
+    the file is available locally for any updates.
+    """
+    if self._local_payload_props_path is None:
+      payload_props_filename = GetPayloadPropertiesFileName(self._payload_name)
+      payload_props_path = os.path.join(self._tempdir, payload_props_filename)
+
+      # Get command to retrieve contents of the properties file.
+      cmd = ['curl',
+             self._GetStagedUrl(payload_props_filename, self._payload_dir)]
+      try:
+        result = self._RemoteDevserverCall(cmd, stdout=True)
+        json.loads(result.output)
+        osutils.WriteFile(payload_props_path, result.output, 'wb',
+                          makedirs=True)
+      except cros_build_lib.RunCommandError as e:
+        raise ChromiumOSTransferError(
+            'Unable to get payload properties file by running %s due to '
+            'exception: %s.' % (' '.join(cmd), e))
+      except ValueError:
+        raise ChromiumOSTransferError(
+            'Could not create %s as %s not valid json.' %
+            (payload_props_path, result.output))
+
+      self._local_payload_props_path = payload_props_path
+    return self._local_payload_props_path
+
+  def _GetPayloadSize(self):
+    """Returns the size of the payload by running a curl -I command.
+
+    Returns:
+      Payload size in bytes.
+    """
+    payload_url = self._GetStagedUrl(staged_filename=self._payload_name,
+                                     build_id=self._payload_dir)
+    cmd = ['curl', '-I', payload_url, '--fail']
+    try:
+      proc = self._RemoteDevserverCall(cmd, stdout=True)
+    except cros_build_lib.RunCommandError as e:
+      raise ChromiumOSTransferError(
+          'Unable to get payload size by running command %s due to exception: '
+          '%s.' % (' '.join(cmd), e))
+
+    pattern = re.compile(r'Content-Length: [0-9]+', re.I)
+    match = pattern.findall(proc.output)
+    if not match:
+      raise ChromiumOSTransferError('Could not get payload size from output: '
+                                    '%s ' % proc.output)
+    return int(match[0].split()[1].strip())
+
+  def GetPayloadProps(self):
+    """Gets image_version from the payload_dir name and gets payload size.
+
+    The payload_dir must be in the format <board>/Rxx-12345.0.0 for a complete
+    match; else a ValueError will be raised.
+
+    Returns:
+      Dict - See parent class's function for full details.
+    """
+    values = {'size': self._GetPayloadSize()}
+    payload_format = self._GetPayloadFormat()
+    payload_pattern = self._GetPayloadPattern()
+    m = re.match(payload_pattern, payload_format)
+    if not m:
+      raise ValueError('Regular expression %r did not match the expected '
+                       'payload format %s' % (payload_pattern, payload_format))
+    values.update(m.groupdict())
+    return values
+
+
+class LabEndToEndPayloadTransfer(LabTransfer):
+  """Abstracts logic that transfers files from staging server to the DUT.
+
+  TODO(crbug.com/1061570): AutoUpdate_endToEnd tests stage their payloads in a
+  different location on the devserver in comparison to the provision_AutoUpdate
+  test. Since we are removing the use of the cros_au RPC (see crbug.com/1049708
+  and go/devserver-deprecation) from the EndToEnd tests, it is necessary to
+  extend LabTransfer class to support this new payload staging location.
+  Ideally, the URL at which the payload is staged should be abstracted from the
+  actual transfer of payloads.
+  """
+
+  def _GetPayloadFormat(self):
+    """Gets the payload format that should be evaluated.
+
+    Returns:
+      The payload name as a string.
+    """
+    return self._payload_name
+
+  def _GetPayloadPattern(self):
+    """The regex pattern that the payload format must match.
+
+    Returns:
+      Regular expression.
+    """
+    return _PAYLOAD_PATTERN
+
+  def _GetCurlCmdForPayloadDownload(self, payload_dir, payload_filename,
+                                    build_id=None):
+    """Returns a valid curl command to download payloads into device tmp dir.
+
+    Args:
+      payload_dir: Path to the payload directory on the device.
+      payload_filename: Name of the file by which the downloaded payload should
+        be saved. This is assumed to be the same as the name of the payload.
+        If the payload_name must is in this format:
+        payloads/whatever_file_name, the 'payloads/' at the start will be
+        removed while saving the file as the files need to be saved in specific
+        directories for their subsequent installation. Keeping the 'payloads/'
+        at the beginning of the payload_filename, adds a new directory that
+        messes up its installation.
+      build_id: This is the path at which the needed payload can be found. It
+        is usually of the format <board_name>-release/R79-12345.6.0. By default,
+        the path is set to None.
+
+    Returns:
+      A fully formed curl command in the format:
+      ['curl', '-o', '<path where payload should be saved>',
+      '<payload download URL>']
+    """
+    saved_filename = payload_filename
+    if saved_filename.startswith('payloads/'):
+      saved_filename = '/'.join(saved_filename.split('/')[1:])
+    cmd = ['curl', '-o', os.path.join(payload_dir, saved_filename),
+           self._GetStagedUrl(payload_filename, build_id)]
+    return cmd
diff --git a/utils/frozen_chromite/lib/buildbot_annotations.py b/utils/frozen_chromite/lib/buildbot_annotations.py
new file mode 100644
index 0000000..079f5ab
--- /dev/null
+++ b/utils/frozen_chromite/lib/buildbot_annotations.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions and classes for formatting buildbot stage annotations."""
+
+from __future__ import print_function
+
+import abc
+import itertools
+import json
+
+import six
+
+
+class Annotation(object):
+  """Formatted annotation for buildbot."""
+
+  def __init__(self, name, args):
+    """Initialize instance.
+
+    Args:
+      name: Annotation name.
+      args: A sequence of string arguments.
+    """
+    self.name = name
+    self.args = args
+
+  def __str__(self):
+    inner_text = '@'.join(
+        _EscapeArgText(text)
+        for text in itertools.chain([self.name], self.args)
+    )
+    return '@@@%s@@@' % (inner_text,)
+
+  @property
+  def human_friendly(self):
+    """Human-friendly format."""
+    if self.args:
+      return '%s: %s' % (self.name, '; '.join(self.args))
+    else:
+      return self.name
+
+
[email protected]_metaclass(abc.ABCMeta)
+class _NamedAnnotation(Annotation):
+  """Abstract subclass for creating named annotations.
+
+  Concrete subclasses should define the ANNOTATION_NAME class attribute.
+  """
+
+  def __init__(self, *args):
+    super(_NamedAnnotation, self).__init__(self.ANNOTATION_NAME, args)
+
+  @abc.abstractproperty
+  def ANNOTATION_NAME(self):
+    raise NotImplementedError()
+
+
+class StepLink(_NamedAnnotation):
+  """STEP_LINK annotation."""
+  ANNOTATION_NAME = 'STEP_LINK'
+
+  # Some callers pass in text/url by kwarg.  We leave the full signature here
+  # so the API is a bit cleaner/more obvious.
+  # pylint: disable=useless-super-delegation
+  def __init__(self, text, url):
+    super(StepLink, self).__init__(text, url)
+
+
+class StepText(_NamedAnnotation):
+  """STEP_TEXT annotation."""
+  ANNOTATION_NAME = 'STEP_TEXT'
+
+
+class StepWarnings(_NamedAnnotation):
+  """STEP_WARNINGS annotation."""
+  ANNOTATION_NAME = 'STEP_WARNINGS'
+
+
+class StepFailure(_NamedAnnotation):
+  """STEP_FAILURE annotation."""
+  ANNOTATION_NAME = 'STEP_FAILURE'
+
+
+class BuildStep(_NamedAnnotation):
+  """BUILD_STEP annotation."""
+  ANNOTATION_NAME = 'BUILD_STEP'
+
+
+class SetBuildProperty(_NamedAnnotation):
+  """SET_BUILD_PROPERTY annotation."""
+  ANNOTATION_NAME = 'SET_BUILD_PROPERTY'
+
+  def __init__(self, name, value):
+    super(SetBuildProperty, self).__init__(name, json.dumps(value))
+
+
+class SetEmailNotifyProperty(_NamedAnnotation):
+  """SET_BUILD_PROPERTY annotation for email_notify."""
+  ANNOTATION_NAME = 'SET_BUILD_PROPERTY'
+
+  def __init__(self, name, value):
+    super(SetEmailNotifyProperty, self).__init__(name, json.dumps(value))
+
+  def __str__(self):
+    inner_text = '@'.join(
+        text for text in itertools.chain([self.name], self.args))
+    return '@@@%s@@@' % (inner_text)
+
+
+def _EscapeArgText(text):
+  """Escape annotation argument text.
+
+  Args:
+    text: String to escape.
+  """
+  return text.replace('@', '-AT-')
diff --git a/utils/frozen_chromite/lib/cache.py b/utils/frozen_chromite/lib/cache.py
new file mode 100644
index 0000000..a34175e
--- /dev/null
+++ b/utils/frozen_chromite/lib/cache.py
@@ -0,0 +1,375 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Contains on-disk caching functionality."""
+
+from __future__ import print_function
+
+import datetime
+import errno
+import os
+import shutil
+import tempfile
+
+from six.moves import urllib
+
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import locking
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+
+
+# pylint: disable=protected-access
+
+
+def EntryLock(f):
+  """Decorator that provides monitor access control."""
+
+  def new_f(self, *args, **kwargs):
+    # Ensure we don't have a read lock before potentially blocking while trying
+    # to access the monitor.
+    if self.read_locked:
+      raise AssertionError(
+          'Cannot call %s while holding a read lock.' % f.__name__)
+
+    with self._entry_lock:
+      self._entry_lock.write_lock()
+      return f(self, *args, **kwargs)
+  return new_f
+
+
+def WriteLock(f):
+  """Decorator that takes a write lock."""
+
+  def new_f(self, *args, **kwargs):
+    with self._lock.write_lock():
+      return f(self, *args, **kwargs)
+  return new_f
+
+
+class CacheReference(object):
+  """Encapsulates operations on a cache key reference.
+
+  CacheReferences are returned by the DiskCache.Lookup() function.  They are
+  used to read from and insert into the cache.
+
+  A typical example of using a CacheReference:
+
+  @contextlib.contextmanager
+  def FetchFromCache()
+    with cache.Lookup(key) as ref:
+       # If entry doesn't exist in cache already, generate it ourselves, and
+       # insert it into the cache, acquiring a read lock on it in the process.
+       # If the entry does exist, we grab a read lock on it.
+      if not ref.Exists(lock=True):
+        path = PrepareItem()
+        ref.SetDefault(path, lock=True)
+
+      # yield the path to the cached entry to consuming code.
+      yield ref.path
+  """
+
+  def __init__(self, cache, key):
+    self._cache = cache
+    self.key = key
+    self.acquired = False
+    self.read_locked = False
+    self._lock = cache._LockForKey(key)
+    self._entry_lock = cache._LockForKey(key, suffix='.entry_lock')
+
+  @property
+  def path(self):
+    """Returns on-disk path to the cached item."""
+    return self._cache.GetKeyPath(self.key)
+
+  def Acquire(self):
+    """Prepare the cache reference for operation.
+
+    This must be called (either explicitly or through entering a 'with'
+    context) before calling any methods that acquire locks, or mutates
+    reference.
+    """
+    if self.acquired:
+      raise AssertionError(
+          'Attempting to acquire an already acquired reference.')
+
+    self.acquired = True
+    self._lock.__enter__()
+
+  def Release(self):
+    """Release the cache reference.  Causes any held locks to be released."""
+    if not self.acquired:
+      raise AssertionError(
+          'Attempting to release an unacquired reference.')
+
+    self.acquired = False
+    self._lock.__exit__(None, None, None)
+    self.read_locked = False
+
+  def __enter__(self):
+    self.Acquire()
+    return self
+
+  def __exit__(self, *args):
+    self.Release()
+
+  def _ReadLock(self):
+    self._lock.read_lock()
+    self.read_locked = True
+
+  @WriteLock
+  def _Assign(self, path):
+    self._cache._Insert(self.key, path)
+
+  @WriteLock
+  def _AssignText(self, text):
+    self._cache._InsertText(self.key, text)
+
+  @WriteLock
+  def _Remove(self):
+    self._cache._Remove(self.key)
+    osutils.SafeUnlink(self._lock.path)
+    osutils.SafeUnlink(self._entry_lock.path)
+
+  def _Exists(self):
+    return self._cache._KeyExists(self.key)
+
+  @EntryLock
+  def Assign(self, path):
+    """Insert a file or a directory into the cache at the referenced key."""
+    self._Assign(path)
+
+  @EntryLock
+  def AssignText(self, text):
+    """Create a file containing |text| and assign it to the key.
+
+    Args:
+      text: Can be a string or an iterable.
+    """
+    self._AssignText(text)
+
+  @EntryLock
+  def Remove(self):
+    """Removes the entry from the cache."""
+    self._Remove()
+
+  @EntryLock
+  def Exists(self, lock=False):
+    """Tests for existence of entry.
+
+    Args:
+      lock: If the entry exists, acquire and maintain a read lock on it.
+    """
+    if self._Exists():
+      if lock:
+        self._ReadLock()
+      return True
+    return False
+
+  @EntryLock
+  def SetDefault(self, default_path, lock=False):
+    """Assigns default_path if the entry doesn't exist.
+
+    Args:
+      default_path: The path to assign if the entry doesn't exist.
+      lock: Acquire and maintain a read lock on the entry.
+    """
+    if not self._Exists():
+      self._Assign(default_path)
+    if lock:
+      self._ReadLock()
+
+
+class DiskCache(object):
+  """Locked file system cache keyed by tuples.
+
+  Key entries can be files or directories.  Access to the cache is provided
+  through CacheReferences, which are retrieved by using the cache Lookup()
+  method.
+  """
+  _STAGING_DIR = 'staging'
+
+  def __init__(self, cache_dir, cache_user=None, lock_suffix='.lock'):
+    self._cache_dir = cache_dir
+    self._cache_user = cache_user
+    self._lock_suffix = lock_suffix
+    self.staging_dir = os.path.join(cache_dir, self._STAGING_DIR)
+
+    osutils.SafeMakedirsNonRoot(self._cache_dir, user=self._cache_user)
+    osutils.SafeMakedirsNonRoot(self.staging_dir, user=self._cache_user)
+
+  def _KeyExists(self, key):
+    return os.path.lexists(self.GetKeyPath(key))
+
+  def GetKeyPath(self, key):
+    """Get the on-disk path of a key."""
+    return os.path.join(self._cache_dir, '+'.join(key))
+
+  def _LockForKey(self, key, suffix=None):
+    """Returns an unacquired lock associated with a key."""
+    suffix = suffix or self._lock_suffix
+    key_path = self.GetKeyPath(key)
+    osutils.SafeMakedirsNonRoot(os.path.dirname(key_path),
+                                user=self._cache_user)
+    lock_path = os.path.join(self._cache_dir, os.path.dirname(key_path),
+                             os.path.basename(key_path) + suffix)
+    return locking.FileLock(lock_path)
+
+  def _TempDirContext(self):
+    return osutils.TempDir(base_dir=self.staging_dir)
+
+  def _Insert(self, key, path):
+    """Insert a file or a directory into the cache at a given key."""
+    self._Remove(key)
+    key_path = self.GetKeyPath(key)
+    osutils.SafeMakedirsNonRoot(os.path.dirname(key_path),
+                                user=self._cache_user)
+    shutil.move(path, key_path)
+
+  def _InsertText(self, key, text):
+    """Inserts a file containing |text| into the cache."""
+    with self._TempDirContext() as tempdir:
+      file_path = os.path.join(tempdir, 'tempfile')
+      osutils.WriteFile(file_path, text)
+      self._Insert(key, file_path)
+
+  def _Remove(self, key):
+    """Remove a key from the cache."""
+    if self._KeyExists(key):
+      with self._TempDirContext() as tempdir:
+        shutil.move(self.GetKeyPath(key), tempdir)
+
+  def GetKey(self, path):
+    """Returns the key for an item's path in the cache."""
+    if self._cache_dir in path:
+      path = os.path.relpath(path, self._cache_dir)
+    return tuple(path.split('+'))
+
+  def ListKeys(self):
+    """Returns a list of keys for every item present in the cache."""
+    keys = []
+    for root, dirs, files in os.walk(self._cache_dir):
+      for f in dirs + files:
+        key_path = os.path.join(root, f)
+        if os.path.exists(key_path + self._lock_suffix):
+          # Test for the presence of the key's lock file to determine if this
+          # is the root key path, or some file nested within a key's dir.
+          keys.append(self.GetKey(key_path))
+    return keys
+
+  def Lookup(self, key):
+    """Get a reference to a given key."""
+    return CacheReference(self, key)
+
+  def DeleteStale(self, max_age):
+    """Removes any item from the cache that was modified after a given lifetime.
+
+    Args:
+      max_age: An instance of datetime.timedelta. Any item not modified within
+          this amount of time will be removed.
+
+    Returns:
+      List of keys removed.
+    """
+    if not isinstance(max_age, datetime.timedelta):
+      raise TypeError('max_age must be an instance of datetime.timedelta.')
+    keys_removed = []
+    for key in self.ListKeys():
+      path = self.GetKeyPath(key)
+      mtime = max(os.path.getmtime(path), os.path.getctime(path))
+      time_since_last_modify = (
+          datetime.datetime.now() - datetime.datetime.fromtimestamp(mtime))
+      if time_since_last_modify > max_age:
+        self.Lookup(key).Remove()
+        keys_removed.append(key)
+    return keys_removed
+
+
+class RemoteCache(DiskCache):
+  """Supports caching of remote objects via URI."""
+
+  def _Fetch(self, url, local_path):
+    """Fetch a remote file."""
+    # We have to nest the import because gs.GSContext uses us to cache its own
+    # gsutil tarball.  We know we won't get into a recursive loop though as it
+    # only fetches files via non-gs URIs.
+    from autotest_lib.utils.frozen_chromite.lib import gs
+
+    if gs.PathIsGs(url):
+      ctx = gs.GSContext()
+      ctx.Copy(url, local_path)
+    else:
+      # Note: unittests assume local_path is at the end.
+      retry_util.RunCurl(['--fail', url, '-o', local_path],
+                         debug_level=logging.DEBUG, capture_output=True)
+
+  def _Insert(self, key, url):  # pylint: disable=arguments-differ
+    """Insert a remote file into the cache."""
+    o = urllib.parse.urlparse(url)
+    if o.scheme in ('file', ''):
+      DiskCache._Insert(self, key, o.path)
+      return
+
+    with tempfile.NamedTemporaryFile(dir=self.staging_dir,
+                                     delete=False) as local_path:
+      self._Fetch(url, local_path.name)
+      DiskCache._Insert(self, key, local_path.name)
+
+
+def Untar(path, cwd, sudo=False):
+  """Untar a tarball."""
+  functor = cros_build_lib.sudo_run if sudo else cros_build_lib.run
+  comp = cros_build_lib.CompressionExtToType(path)
+  cmd = ['tar']
+  if comp != cros_build_lib.COMP_NONE:
+    cmd += ['-I', cros_build_lib.FindCompressor(comp)]
+  functor(cmd + ['-xpf', path], cwd=cwd, debug_level=logging.DEBUG, quiet=True)
+
+
+class TarballCache(RemoteCache):
+  """Supports caching of extracted tarball contents."""
+
+  def _Insert(self, key, tarball_path):  # pylint: disable=arguments-differ
+    """Insert a tarball and its extracted contents into the cache.
+
+    Download the tarball first if a URL is provided as tarball_path.
+    """
+    with osutils.TempDir(prefix='tarball-cache',
+                         base_dir=self.staging_dir) as tempdir:
+
+      o = urllib.parse.urlsplit(tarball_path)
+      if o.scheme == 'file':
+        tarball_path = o.path
+      elif o.scheme:
+        url = tarball_path
+        tarball_path = os.path.join(tempdir, os.path.basename(o.path))
+        self._Fetch(url, tarball_path)
+
+      extract_path = os.path.join(tempdir, 'extract')
+      os.mkdir(extract_path)
+      Untar(tarball_path, extract_path)
+      DiskCache._Insert(self, key, extract_path)
+
+  def _KeyExists(self, key):
+    """Specialized DiskCache._KeyExits that ignores empty directories.
+
+    The normal _KeyExists just checks to see if the key path exists in the cache
+    directory. Many tests mock out run then fetch a tarball. The mock
+    blocks untarring into it. This leaves behind an empty dir which blocks
+    future untarring in non-test scripts.
+
+    See crbug.com/468838
+    """
+    # Wipe out empty directories before testing for existence.
+    key_path = self.GetKeyPath(key)
+
+    try:
+      os.rmdir(key_path)
+    except OSError as ex:
+      if ex.errno not in (errno.ENOTEMPTY, errno.ENOENT):
+        raise
+
+    return os.path.exists(key_path)
diff --git a/utils/frozen_chromite/lib/cipd.py b/utils/frozen_chromite/lib/cipd.py
new file mode 100644
index 0000000..9ab375f
--- /dev/null
+++ b/utils/frozen_chromite/lib/cipd.py
@@ -0,0 +1,266 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module to download and run the CIPD client.
+
+CIPD is the Chrome Infra Package Deployer, a simple method of resolving a
+package/version into a GStorage link and installing them.
+"""
+
+from __future__ import print_function
+
+import hashlib
+import json
+import os
+import pprint
+import tempfile
+
+import httplib2
+from six.moves import urllib
+
+import autotest_lib.utils.frozen_chromite.lib.cros_logging as log
+from autotest_lib.utils.frozen_chromite.lib import cache
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import path_util
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.utils import memoize
+
+# pylint: disable=line-too-long
+# CIPD client to download.
+#
+# This is version "git_revision:db7a486094873e3944b8e27ab5b23a3ae3c401e7".
+#
+# To switch to another version:
+#   1. Find it in CIPD Web UI, e.g.
+#      https://chrome-infra-packages.appspot.com/p/infra/tools/cipd/linux-amd64/+/latest
+#   2. Look up SHA256 there.
+# pylint: enable=line-too-long
+CIPD_CLIENT_PACKAGE = 'infra/tools/cipd/linux-amd64'
+CIPD_CLIENT_SHA256 = (
+    'ea6b7547ddd316f32fd9974f598949c3f8f22f6beb8c260370242d0d84825162')
+
+CHROME_INFRA_PACKAGES_API_BASE = (
+    'https://chrome-infra-packages.appspot.com/prpc/cipd.Repository/')
+
+
+class Error(Exception):
+  """Raised on fatal errors."""
+
+
+def _ChromeInfraRequest(method, request):
+  """Makes a request to the Chrome Infra Packages API with httplib2.
+
+  Args:
+    method: Name of RPC method to call.
+    request: RPC request body.
+
+  Returns:
+    Deserialized RPC response body.
+  """
+  resp, body = httplib2.Http().request(
+      uri=CHROME_INFRA_PACKAGES_API_BASE+method,
+      method='POST',
+      headers={
+          'Accept': 'application/json',
+          'Content-Type': 'application/json',
+          'User-Agent': 'chromite',
+      },
+      body=json.dumps(request))
+  if resp.status != 200:
+    raise Error('Got HTTP %d from CIPD %r: %s' % (resp.status, method, body))
+  try:
+    return json.loads(body.lstrip(b")]}'\n"))
+  except ValueError:
+    raise Error('Bad response from CIPD server:\n%s' % (body,))
+
+
+def _DownloadCIPD(instance_sha256):
+  """Finds the CIPD download link and requests the binary.
+
+  Args:
+    instance_sha256: The version of CIPD client to download.
+
+  Returns:
+    The CIPD binary as a string.
+  """
+  # Grab the signed URL to fetch the client binary from.
+  resp = _ChromeInfraRequest('DescribeClient', {
+      'package': CIPD_CLIENT_PACKAGE,
+      'instance': {
+          'hashAlgo': 'SHA256',
+          'hexDigest': instance_sha256,
+      },
+  })
+  if 'clientBinary' not in resp:
+    log.error(
+        'Error requesting the link to download CIPD from. Got:\n%s',
+        pprint.pformat(resp))
+    raise Error('Failed to bootstrap CIPD client')
+
+  # Download the actual binary.
+  http = httplib2.Http(cache=None)
+  response, binary = http.request(uri=resp['clientBinary']['signedUrl'])
+  if response.status != 200:
+    raise Error('Got a %d response from Google Storage.' % response.status)
+
+  # Check SHA256 matches what server expects.
+  digest = hashlib.sha256(binary).hexdigest()
+  for alias in resp['clientRefAliases']:
+    if alias['hashAlgo'] == 'SHA256':
+      if digest != alias['hexDigest']:
+        raise Error(
+            'Unexpected CIPD client SHA256: got %s, want %s' %
+            (digest, alias['hexDigest']))
+      break
+  else:
+    raise Error("CIPD server didn't provide expected SHA256")
+
+  return binary
+
+
+class CipdCache(cache.RemoteCache):
+  """Supports caching of the CIPD download."""
+  def _Fetch(self, url, local_path):
+    instance_sha256 = urllib.parse.urlparse(url).netloc
+    binary = _DownloadCIPD(instance_sha256)
+    log.info('Fetched CIPD package %s:%s', CIPD_CLIENT_PACKAGE, instance_sha256)
+    osutils.WriteFile(local_path, binary, mode='wb')
+    os.chmod(local_path, 0o755)
+
+
+def GetCIPDFromCache():
+  """Checks the cache, downloading CIPD if it is missing.
+
+  Returns:
+    Path to the CIPD binary.
+  """
+  cache_dir = os.path.join(path_util.GetCacheDir(), 'cipd')
+  bin_cache = CipdCache(cache_dir)
+  key = (CIPD_CLIENT_SHA256,)
+  ref = bin_cache.Lookup(key)
+  ref.SetDefault('cipd://' + CIPD_CLIENT_SHA256)
+  return ref.path
+
+
+def GetInstanceID(cipd_path, package, version, service_account_json=None):
+  """Get the latest instance ID for ref latest.
+
+  Args:
+    cipd_path: The path to a cipd executable. GetCIPDFromCache can give this.
+    package: A string package name.
+    version: A string version of package.
+    service_account_json: The path of the service account credentials.
+
+  Returns:
+    A string instance ID.
+  """
+  service_account_flag = []
+  if service_account_json:
+    service_account_flag = ['-service-account-json', service_account_json]
+
+  result = cros_build_lib.run(
+      [cipd_path, 'resolve', package, '-version', version] +
+      service_account_flag, capture_output=True, encoding='utf-8')
+  # An example output of resolve is like:
+  #   Packages:\n package:instance_id
+  return result.output.splitlines()[-1].split(':')[-1]
+
+
[email protected]
+def InstallPackage(cipd_path, package, instance_id, destination,
+                   service_account_json=None):
+  """Installs a package at a given destination using cipd.
+
+  Args:
+    cipd_path: The path to a cipd executable. GetCIPDFromCache can give this.
+    package: A package name.
+    instance_id: The version of the package to install.
+    destination: The folder to install the package under.
+    service_account_json: The path of the service account credentials.
+
+  Returns:
+    The path of the package.
+  """
+  destination = os.path.join(destination, package)
+
+  service_account_flag = []
+  if service_account_json:
+    service_account_flag = ['-service-account-json', service_account_json]
+
+  with tempfile.NamedTemporaryFile() as f:
+    f.write(('%s %s' % (package, instance_id)).encode('utf-8'))
+    f.flush()
+
+    cros_build_lib.run(
+        [cipd_path, 'ensure', '-root', destination, '-list', f.name]
+        + service_account_flag,
+        capture_output=True)
+
+  return destination
+
+
+def CreatePackage(cipd_path, package, in_dir, tags, refs,
+                  cred_path=None):
+  """Create (build and register) a package using cipd.
+
+  Args:
+    cipd_path: The path to a cipd executable. GetCIPDFromCache can give this.
+    package: A package name.
+    in_dir: The directory to create the package from.
+    tags: A mapping of tags to apply to the package.
+    refs: An Iterable of refs to apply to the package.
+    cred_path: The path of the service account credentials.
+  """
+  args = [
+      cipd_path, 'create',
+      '-name', package,
+      '-in', in_dir,
+  ]
+  for key, value in tags.items():
+    args.extend(['-tag', '%s:%s' % (key, value)])
+  for ref in refs:
+    args.extend(['-ref', ref])
+  if cred_path:
+    args.extend(['-service-account-json', cred_path])
+
+  cros_build_lib.run(args, capture_output=True)
+
+
+def BuildPackage(cipd_path, package, in_dir, outfile):
+  """Build a package using cipd.
+
+  Args:
+    cipd_path: The path to a cipd executable. GetCIPDFromCache can give this.
+    package: A package name.
+    in_dir: The directory to create the package from.
+    outfile: Output file.  Should have extension .cipd
+  """
+  args = [
+      cipd_path, 'pkg-build',
+      '-name', package,
+      '-in', in_dir,
+      '-out', outfile,
+  ]
+  cros_build_lib.run(args, capture_output=True)
+
+
+def RegisterPackage(cipd_path, package_file, tags, refs, cred_path=None):
+  """Register and upload a package using cipd.
+
+  Args:
+    cipd_path: The path to a cipd executable. GetCIPDFromCache can give this.
+    package_file: The path to a .cipd package file.
+    tags: A mapping of tags to apply to the package.
+    refs: An Iterable of refs to apply to the package.
+    cred_path: The path of the service account credentials.
+  """
+  args = [cipd_path, 'pkg-register', package_file]
+  for key, value in tags.items():
+    args.extend(['-tag', '%s:%s' % (key, value)])
+  for ref in refs:
+    args.extend(['-ref', ref])
+  if cred_path:
+    args.extend(['-service-account-json', cred_path])
+  cros_build_lib.run(args, capture_output=True)
diff --git a/utils/frozen_chromite/lib/commandline.py b/utils/frozen_chromite/lib/commandline.py
new file mode 100644
index 0000000..7292846
--- /dev/null
+++ b/utils/frozen_chromite/lib/commandline.py
@@ -0,0 +1,1064 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Purpose of this module is to hold common script/commandline functionality.
+
+This ranges from optparse, to a basic script wrapper setup (much like
+what is used for chromite.bin.*).
+"""
+
+from __future__ import print_function
+
+import argparse
+import collections
+import datetime
+import functools
+import os
+import optparse  # pylint: disable=deprecated-module
+import signal
+import sys
+
+import six
+from six.moves import urllib
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_collections
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import gs
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import path_util
+from autotest_lib.utils.frozen_chromite.lib import terminal
+from autotest_lib.utils.frozen_chromite.utils import attrs_freezer
+
+
+DEVICE_SCHEME_FILE = 'file'
+DEVICE_SCHEME_SERVO = 'servo'
+DEVICE_SCHEME_SSH = 'ssh'
+DEVICE_SCHEME_USB = 'usb'
+
+
+class ChrootRequiredError(Exception):
+  """Raised when a command must be run in the chroot
+
+  This exception is intended to be caught by code which will restart execution
+  in the chroot. Throwing this exception allows contexts to be exited and
+  general cleanup to happen before we exec an external binary.
+
+  The command to run inside the chroot, and (optionally) special cros_sdk
+  arguments are attached to the exception. Any adjustments to the arguments
+  should be done before raising the exception.
+  """
+  def __init__(self, cmd, chroot_args=None, extra_env=None):
+    """Constructor for ChrootRequiredError.
+
+    Args:
+      cmd: Command line to run inside the chroot as a list of strings.
+      chroot_args: Arguments to pass directly to cros_sdk.
+      extra_env: Environmental variables to set in the chroot.
+    """
+    super(ChrootRequiredError, self).__init__()
+    self.cmd = cmd
+    self.chroot_args = chroot_args
+    self.extra_env = extra_env
+
+
+class ExecRequiredError(Exception):
+  """Raised when a command needs to exec, after cleanup.
+
+  This exception is intended to be caught by code which will exec another
+  command. Throwing this exception allows contexts to be exited and general
+  cleanup to happen before we exec an external binary.
+
+  The command to run is attached to the exception. Any adjustments to the
+  arguments should be done before raising the exception.
+  """
+  def __init__(self, cmd):
+    """Constructor for ExecRequiredError.
+
+    Args:
+      cmd: Command line to run inside the chroot as a list of strings.
+    """
+    super(ExecRequiredError, self).__init__()
+    self.cmd = cmd
+
+
+def AbsolutePath(_option, _opt, value):
+  """Expand paths and make them absolute."""
+  return osutils.ExpandPath(value)
+
+
+def NormalizeGSPath(value):
+  """Normalize GS paths."""
+  url = gs.CanonicalizeURL(value, strict=True)
+  return '%s%s' % (gs.BASE_GS_URL, os.path.normpath(url[len(gs.BASE_GS_URL):]))
+
+
+def NormalizeLocalOrGSPath(value):
+  """Normalize a local or GS path."""
+  ptype = 'gs_path' if gs.PathIsGs(value) else 'path'
+  return VALID_TYPES[ptype](value)
+
+
+def NormalizeAbUrl(value):
+  """Normalize an androidbuild URL."""
+  if not value.startswith('ab://'):
+    # Give a helpful error message about the format expected.  Putting this
+    # message in the exception is useless because argparse ignores the
+    # exception message and just says the value is invalid.
+    msg = 'Invalid ab:// URL format: [%s].' % value
+    logging.error(msg)
+    raise ValueError(msg)
+
+  # If no errors, just return the unmodified value.
+  return value
+
+
+def ValidateCipdURL(value):
+  """Return plain string."""
+  if not value.startswith('cipd://'):
+    msg = 'Invalid cipd:// URL format: %s' % value
+    logging.error(msg)
+    raise ValueError(msg)
+  return value
+
+
+def ParseBool(value):
+  """Parse bool argument into a bool value.
+
+  For the existing type=bool functionality, the parser uses the built-in bool(x)
+  function to determine the value.  This function will only return false if x
+  is False or omitted.  Even with this type specified, however, arguments that
+  are generated from a command line initially get parsed as a string, and for
+  any string value passed in to bool(x), it will always return True.
+
+  Args:
+    value: String representing a boolean value.
+
+  Returns:
+    True or False.
+  """
+  return cros_build_lib.BooleanShellValue(value, False)
+
+
+def ParseDate(value):
+  """Parse date argument into a datetime.date object.
+
+  Args:
+    value: String representing a single date in "YYYY-MM-DD" format.
+
+  Returns:
+    A datetime.date object.
+  """
+  try:
+    return datetime.datetime.strptime(value, '%Y-%m-%d').date()
+  except ValueError:
+    # Give a helpful error message about the format expected.  Putting this
+    # message in the exception is useless because argparse ignores the
+    # exception message and just says the value is invalid.
+    logging.error('Date is expected to be in format YYYY-MM-DD.')
+    raise
+
+
+def NormalizeUri(value):
+  """Normalize a local path or URI."""
+  o = urllib.parse.urlparse(value)
+  if o.scheme == 'file':
+    # Trim off the file:// prefix.
+    return VALID_TYPES['path'](value[7:])
+  elif o.scheme not in ('', 'gs'):
+    o = list(o)
+    o[2] = os.path.normpath(o[2])
+    return urllib.parse.urlunparse(o)
+  else:
+    return NormalizeLocalOrGSPath(value)
+
+
+# A Device object holds information parsed from the command line input:
+#   scheme: DEVICE_SCHEME_SSH, DEVICE_SCHEME_USB, DEVICE_SCHEME_SERVO,
+#     or DEVICE_SCHEME_FILE.
+#   username: String SSH username or None.
+#   hostname: String SSH hostname or None.
+#   port: Int SSH or Servo port or None.
+#   path: String USB/file path or None.
+#   raw: String raw input from the command line.
+#   serial_number: String Servo serial number or None.
+# For now this is a superset of all information for USB, SSH, or file devices.
+# If functionality diverges based on type, it may be useful to split this into
+# separate device classes instead.
+Device = cros_collections.Collection(
+    'Device', scheme=None, username=None, hostname=None, port=None, path=None,
+    raw=None, serial_number=None)
+
+
+class DeviceParser(object):
+  """Parses devices as an argparse argument type.
+
+  In addition to parsing user input, this class will also ensure that only
+  supported device schemes are accepted by the parser. For example,
+  `cros deploy` only makes sense with an SSH device, but `cros flash` can use
+  SSH, USB, or file device schemes.
+
+  If the device input is malformed or the scheme is wrong, an error message will
+  be printed and the program will exit.
+
+  Valid device inputs are:
+    - [ssh://][username@]hostname[:port].
+    - usb://[path].
+    - file://path or /absolute_path.
+    - servo:port[:port] to use a port via dut-control, e.g. servo:port:1234.
+    - servo:serial:serial-number to use the servo's serial number,
+        e.g. servo:serial:641220-00057 servo:serial:C1230024192.
+    - [ssh://]:vm:.
+
+  The last item above is an alias for ssh'ing into a virtual machine on a
+  localhost.  It gets translated into 'localhost:9222'.
+
+  Examples:
+    parser = argparse.ArgumentParser()
+
+    parser.add_argument(
+      'ssh_device',
+      type=commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH))
+
+    parser.add_argument(
+      'usb_or_file_device',
+      type=commandline.DeviceParser([commandline.DEVICE_SCHEME_USB,
+                                     commandline.DEVICE_SCHEME_FILE]))
+  """
+
+  def __init__(self, schemes):
+    """Initializes the parser.
+
+    See the class comments for usage examples.
+
+    Args:
+      schemes: A scheme or list of schemes to accept.
+    """
+    self.schemes = ([schemes] if isinstance(schemes, six.string_types)
+                    else schemes)
+    # Provide __name__ for argparse to print on failure, or else it will use
+    # repr() which creates a confusing error message.
+    self.__name__ = type(self).__name__
+
+  def __call__(self, value):
+    """Parses a device input and enforces constraints.
+
+    DeviceParser is an object so that a set of valid schemes can be specified,
+    but argparse expects a parsing function, so we overload __call__() for
+    argparse to use.
+
+    Args:
+      value: String representing a device target. See class comments for
+        valid device input formats.
+
+    Returns:
+      A Device object.
+
+    Raises:
+      ValueError: |value| is not a valid device specifier or doesn't
+        match the supported list of schemes.
+    """
+    try:
+      device = self._ParseDevice(value)
+      self._EnforceConstraints(device, value)
+      return device
+    except ValueError as e:
+      # argparse ignores exception messages, so print the message manually.
+      logging.error(e)
+      raise
+    except Exception as e:
+      logging.error('Internal error while parsing device input: %s', e)
+      raise
+
+  def _EnforceConstraints(self, device, value):
+    """Verifies that user-specified constraints are upheld.
+
+    Checks that the parsed device has a scheme that matches what the user
+    expects. Additional constraints can be added if needed.
+
+    Args:
+      device: Device object.
+      value: String representing a device target.
+
+    Raises:
+      ValueError: |device| has the wrong scheme.
+    """
+    if device.scheme not in self.schemes:
+      raise ValueError('Unsupported scheme "%s" for device "%s"' %
+                       (device.scheme, value))
+
+  def _ParseDevice(self, value):
+    """Parse a device argument.
+
+    Args:
+      value: String representing a device target.
+
+    Returns:
+      A Device object.
+
+    Raises:
+      ValueError: |value| is not a valid device specifier.
+    """
+    # ':vm:' is an alias for ssh'ing into a virtual machihne on localhost;
+    # translate it appropriately.
+    if value.strip().lower() == ':vm:':
+      value = 'localhost:9222'
+    elif value.strip().lower() == 'ssh://:vm:':
+      value = 'ssh://localhost:9222'
+    parsed = urllib.parse.urlparse(value)
+
+    # crbug.com/1069325: Starting in python 3.7 urllib has different parsing
+    # results. 127.0.0.1:9999 parses as scheme='127.0.0.1' path='9999'
+    # instead of scheme='' path='127.0.0.1:9999'. We want that parsed as ssh.
+    # Check for '.' or 'localhost' in the scheme to catch the most common cases
+    # for this result.
+    if (not parsed.scheme or '.' in parsed.scheme or
+        parsed.scheme == 'localhost'):
+      # Default to a file scheme for absolute paths, SSH scheme otherwise.
+      if value and value[0] == '/':
+        scheme = DEVICE_SCHEME_FILE
+      else:
+        # urlparse won't provide hostname/username/port unless a scheme is
+        # specified so we need to re-parse.
+        parsed = urllib.parse.urlparse('%s://%s' % (DEVICE_SCHEME_SSH, value))
+        scheme = DEVICE_SCHEME_SSH
+    else:
+      scheme = parsed.scheme.lower()
+
+    if scheme == DEVICE_SCHEME_SSH:
+      hostname = parsed.hostname
+      port = parsed.port
+      if hostname == 'localhost' and not port:
+        # Use of localhost as the actual machine is uncommon enough relative to
+        # the use of KVM that we require users to specify localhost:22 if they
+        # actually want to connect to the localhost.  Otherwise the expectation
+        # is that they intend to access the VM but forget or didn't know to use
+        # port 9222.
+        raise ValueError('To connect to localhost, use ssh://localhost:22 '
+                         'explicitly, or use ssh://localhost:9222 for the local'
+                         ' VM.')
+      if not hostname:
+        raise ValueError('Hostname is required for device "%s"' % value)
+      return Device(scheme=scheme, username=parsed.username, hostname=hostname,
+                    port=port, raw=value)
+    elif scheme == DEVICE_SCHEME_USB:
+      path = parsed.netloc + parsed.path
+      # Change path '' to None for consistency.
+      return Device(scheme=scheme, path=path if path else None, raw=value)
+    elif scheme == DEVICE_SCHEME_FILE:
+      path = parsed.netloc + parsed.path
+      if not path:
+        raise ValueError('Path is required for "%s"' % value)
+      return Device(scheme=scheme, path=path, raw=value)
+    elif scheme == DEVICE_SCHEME_SERVO:
+      # Parse the identifier type and value.
+      servo_type, _, servo_id = parsed.path.partition(':')
+      # Don't want to do the netloc before the split in case of serial number.
+      servo_type = servo_type.lower()
+
+      return self._parse_servo(servo_type, servo_id)
+    else:
+      raise ValueError('Unknown device scheme "%s" in "%s"' % (scheme, value))
+
+  @staticmethod
+  def _parse_servo(servo_type, servo_id):
+    """Parse a servo device from the parsed servo uri info.
+
+    Args:
+      servo_type: The servo identifier type, either port or serial.
+      servo_id: The servo identifier, either the port number it is
+        communicating through or its serial number.
+    """
+    servo_port = None
+    serial_number = None
+    if servo_type == 'serial':
+      if servo_id:
+        serial_number = servo_id
+      else:
+        raise ValueError('No serial number given.')
+    elif servo_type == 'port':
+      if servo_id:
+        # Parse and validate when given.
+        try:
+          servo_port = int(servo_id)
+        except ValueError:
+          raise ValueError('Invalid servo port value: %s' % servo_id)
+        if servo_port <= 0 or servo_port > 65535:
+          raise ValueError(
+              'Invalid port, must be 1-65535: %d given.' % servo_port)
+    else:
+      raise ValueError('Invalid servo type given: %s' % servo_type)
+
+    return Device(
+        scheme=DEVICE_SCHEME_SERVO,
+        port=servo_port,
+        serial_number=serial_number)
+
+
+class _AppendOption(argparse.Action):
+  """Append the command line option (with no arguments) to dest.
+
+  parser.add_argument('-b', '--barg', dest='out', action='append_option')
+  options = parser.parse_args(['-b', '--barg'])
+  options.out == ['-b', '--barg']
+  """
+  def __init__(self, option_strings, dest, **kwargs):
+    if 'nargs' in kwargs:
+      raise ValueError('nargs is not supported for append_option action')
+    super(_AppendOption, self).__init__(
+        option_strings, dest, nargs=0, **kwargs)
+
+  def __call__(self, parser, namespace, values, option_string=None):
+    if getattr(namespace, self.dest, None) is None:
+      setattr(namespace, self.dest, [])
+    getattr(namespace, self.dest).append(option_string)
+
+
+class _AppendOptionValue(argparse.Action):
+  """Append the command line option to dest. Useful for pass along arguments.
+
+  parser.add_argument('-b', '--barg', dest='out', action='append_option_value')
+  options = parser.parse_args(['--barg', 'foo', '-b', 'bar'])
+  options.out == ['-barg', 'foo', '-b', 'bar']
+  """
+  def __call__(self, parser, namespace, values, option_string=None):
+    if getattr(namespace, self.dest, None) is None:
+      setattr(namespace, self.dest, [])
+    getattr(namespace, self.dest).extend([option_string, str(values)])
+
+
+class _SplitExtendAction(argparse.Action):
+  """Callback to split the argument and extend existing value.
+
+  We normalize whitespace before splitting.  This is to support the forms:
+    cbuildbot -p 'proj:branch ' ...
+    cbuildbot -p ' proj:branch' ...
+    cbuildbot -p 'proj:branch  proj2:branch' ...
+    cbuildbot -p "$(some_command_that_returns_nothing)" ...
+  """
+  def __call__(self, parser, namespace, values, option_string=None):
+    if getattr(namespace, self.dest, None) is None:
+      setattr(namespace, self.dest, [])
+    getattr(namespace, self.dest).extend(values.split())
+
+
+VALID_TYPES = {
+    'ab_url': NormalizeAbUrl,
+    'bool': ParseBool,
+    'cipd': ValidateCipdURL,
+    'date': ParseDate,
+    'path': osutils.ExpandPath,
+    'gs_path': NormalizeGSPath,
+    'local_or_gs_path': NormalizeLocalOrGSPath,
+    'path_or_uri': NormalizeUri,
+}
+
+VALID_ACTIONS = {
+    'append_option': _AppendOption,
+    'append_option_value': _AppendOptionValue,
+    'split_extend': _SplitExtendAction,
+}
+
+_DEPRECATE_ACTIONS = [None, 'store', 'store_const', 'store_true', 'store_false',
+                      'append', 'append_const', 'count'] + list(VALID_ACTIONS)
+
+
+class _DeprecatedAction(object):
+  """Base functionality to allow adding warnings for deprecated arguments.
+
+  To add a deprecated warning, simply include a deprecated=message argument
+  to the add_argument call for the deprecated argument. Beside logging the
+  deprecation warning, the argument will behave as normal.
+  """
+
+  def __init__(self, *args, **kwargs):
+    """Init override to extract the deprecated argument when it exists."""
+    self.deprecated_message = kwargs.pop('deprecated', None)
+    super(_DeprecatedAction, self).__init__(*args, **kwargs)
+
+  def __call__(self, parser, namespace, values, option_string=None):
+    """Log the message then defer to the parent action."""
+    if self.deprecated_message:
+      logging.warning('Argument %s is deprecated: %s', option_string,
+                      self.deprecated_message)
+    return super(_DeprecatedAction, self).__call__(
+        parser, namespace, values, option_string=option_string)
+
+
+def OptparseWrapCheck(desc, check_f, _option, opt, value):
+  """Optparse adapter for type checking functionality."""
+  try:
+    return check_f(value)
+  except ValueError:
+    raise optparse.OptionValueError(
+        'Invalid %s given: --%s=%s' % (desc, opt, value))
+
+
+class Option(optparse.Option):
+  """Subclass to implement path evaluation & other useful types."""
+
+  _EXTRA_TYPES = ('path', 'gs_path')
+  TYPES = optparse.Option.TYPES + _EXTRA_TYPES
+  TYPE_CHECKER = optparse.Option.TYPE_CHECKER.copy()
+  for t in _EXTRA_TYPES:
+    TYPE_CHECKER[t] = functools.partial(OptparseWrapCheck, t, VALID_TYPES[t])
+
+
+class FilteringOption(Option):
+  """Subclass that supports Option filtering for FilteringOptionParser"""
+
+  _EXTRA_ACTIONS = ('split_extend',)
+  ACTIONS = Option.ACTIONS + _EXTRA_ACTIONS
+  STORE_ACTIONS = Option.STORE_ACTIONS + _EXTRA_ACTIONS
+  TYPED_ACTIONS = Option.TYPED_ACTIONS + _EXTRA_ACTIONS
+  ALWAYS_TYPED_ACTIONS = (Option.ALWAYS_TYPED_ACTIONS + _EXTRA_ACTIONS)
+
+  def take_action(self, action, dest, opt, value, values, parser):
+    if action == 'split_extend':
+      lvalue = value.split()
+      values.ensure_value(dest, []).extend(lvalue)
+    else:
+      Option.take_action(self, action, dest, opt, value, values, parser)
+
+    if value is None:
+      value = []
+    elif not self.nargs or self.nargs <= 1:
+      value = [value]
+
+    parser.AddParsedArg(self, opt, [str(v) for v in value])
+
+
+class ColoredFormatter(logging.Formatter):
+  """A logging formatter that can color the messages."""
+
+  _COLOR_MAPPING = {
+      'WARNING': terminal.Color.YELLOW,
+      'ERROR': terminal.Color.RED,
+  }
+
+  def __init__(self, *args, **kwargs):
+    """Initializes the formatter.
+
+    Args:
+      args: See logging.Formatter for specifics.
+      kwargs: See logging.Formatter for specifics.
+      enable_color: Whether to enable colored logging. Defaults
+        to None, where terminal.Color will set to a sane default.
+    """
+    self.color = terminal.Color(enabled=kwargs.pop('enable_color', None))
+    super(ColoredFormatter, self).__init__(*args, **kwargs)
+
+  def format(self, record):
+    """Formats |record| with color."""
+    msg = super(ColoredFormatter, self).format(record)
+    color = self._COLOR_MAPPING.get(record.levelname)
+    return msg if not color else self.color.Color(color, msg)
+
+
+class ChromiteStreamHandler(logging.StreamHandler):
+  """A stream handler for logging."""
+
+
+class BaseParser(object):
+  """Base parser class that includes the logic to add logging controls."""
+
+  DEFAULT_LOG_LEVELS = ('fatal', 'critical', 'error', 'warning', 'notice',
+                        'info', 'debug')
+
+  DEFAULT_LOG_LEVEL = 'info'
+  ALLOW_LOGGING = True
+
+  def __init__(self, **kwargs):
+    """Initialize this parser instance.
+
+    kwargs:
+      logging: Defaults to ALLOW_LOGGING from the class; if given,
+        add --log-level.
+      default_log_level: If logging is enabled, override the default logging
+        level.  Defaults to the class's DEFAULT_LOG_LEVEL value.
+      log_levels: If logging is enabled, this overrides the enumeration of
+        allowed logging levels.  If not given, defaults to the classes
+        DEFAULT_LOG_LEVELS value.
+      manual_debug: If logging is enabled and this is True, suppress addition
+        of a --debug alias.  This option defaults to True unless 'debug' has
+        been exempted from the allowed logging level targets.
+      caching: If given, must be either a callable that discerns the cache
+        location if it wasn't specified (the prototype must be akin to
+        lambda parser, values:calculated_cache_dir_path; it may return None to
+        indicate that it handles setting the value on its own later in the
+        parsing including setting the env), or True; if True, the
+        machinery defaults to invoking the class's FindCacheDir method
+        (which can be overridden).  FindCacheDir $CROS_CACHEDIR, falling
+        back to $REPO/.cache, finally falling back to $TMP.
+        Note that the cache_dir is not created, just discerned where it
+        should live.
+        If False, or caching is not given, then no --cache-dir option will be
+        added.
+    """
+    self.debug_enabled = False
+    self.caching_group = None
+    self.debug_group = None
+    self.default_log_level = None
+    self.log_levels = None
+    self.logging_enabled = kwargs.get('logging', self.ALLOW_LOGGING)
+    self.default_log_level = kwargs.get('default_log_level',
+                                        self.DEFAULT_LOG_LEVEL)
+    self.log_levels = tuple(x.lower() for x in
+                            kwargs.get('log_levels', self.DEFAULT_LOG_LEVELS))
+    self.debug_enabled = (not kwargs.get('manual_debug', False)
+                          and 'debug' in self.log_levels)
+    self.caching = kwargs.get('caching', False)
+    self._cros_defaults = {}
+
+  @staticmethod
+  def PopUsedArgs(kwarg_dict):
+    """Removes keys used by the base parser from the kwarg namespace."""
+    parser_keys = ['logging', 'default_log_level', 'log_levels', 'manual_debug',
+                   'caching']
+    for key in parser_keys:
+      kwarg_dict.pop(key, None)
+
+  def SetupOptions(self):
+    """Sets up standard chromite options."""
+    # NB: All options here must go through add_common_argument_to_group.
+    # You cannot use add_argument or such helpers directly.  This is to
+    # support default values with subparsers.
+    #
+    # You should also explicitly add default=None here when you want the
+    # default to be set up in the parsed option namespace.
+    if self.logging_enabled:
+      self.debug_group = self.add_argument_group('Debug options')
+      self.add_common_argument_to_group(
+          self.debug_group, '--log-level', choices=self.log_levels,
+          default=self.default_log_level,
+          help='Set logging level to report at.')
+      self.add_common_argument_to_group(
+          self.debug_group, '--log-format', action='store',
+          default=constants.LOGGER_FMT,
+          help='Set logging format to use.')
+      # Backwards compat name.  We should delete this at some point.
+      self.add_common_argument_to_group(
+          self.debug_group, '--log_format', action='store',
+          default=constants.LOGGER_FMT,
+          help=argparse.SUPPRESS)
+      self.add_common_argument_to_group(
+          self.debug_group,
+          '-v',
+          '--verbose',
+          action='store_const',
+          const='info',
+          dest='log_level',
+          help='Alias for `--log-level=info`.')
+      if self.debug_enabled:
+        self.add_common_argument_to_group(
+            self.debug_group, '--debug', action='store_const', const='debug',
+            dest='log_level', help='Alias for `--log-level=debug`. '
+            'Useful for debugging bugs/failures.')
+      self.add_common_argument_to_group(
+          self.debug_group, '--nocolor', action='store_false', dest='color',
+          default=None,
+          help='Do not use colorized output (or `export NOCOLOR=true`)')
+
+    if self.caching:
+      self.caching_group = self.add_argument_group('Caching Options')
+      self.add_common_argument_to_group(
+          self.caching_group, '--cache-dir', default=None, type='path',
+          help='Override the calculated chromeos cache directory; '
+          "typically defaults to '$REPO/.cache' .")
+
+  def SetupLogging(self, opts):
+    """Sets up logging based on |opts|."""
+    value = opts.log_level.upper()
+    logger = logging.getLogger()
+    logger.setLevel(getattr(logging, value))
+    formatter = ColoredFormatter(fmt=opts.log_format,
+                                 datefmt=constants.LOGGER_DATE_FMT,
+                                 enable_color=opts.color)
+
+    # Only set colored formatter for ChromiteStreamHandler instances,
+    # which could have been added by ScriptWrapperMain() below.
+    chromite_handlers = [x for x in logger.handlers if
+                         isinstance(x, ChromiteStreamHandler)]
+    for handler in chromite_handlers:
+      handler.setFormatter(formatter)
+
+    logging.captureWarnings(True)
+
+    return value
+
+  def DoPostParseSetup(self, opts, args):
+    """Method called to handle post opts/args setup.
+
+    This can be anything from logging setup to positional arg count validation.
+
+    Args:
+      opts: optparse.Values or argparse.Namespace instance
+      args: position arguments unconsumed from parsing.
+
+    Returns:
+      (opts, args), w/ whatever modification done.
+    """
+    for dest, default in self._cros_defaults.items():
+      if not hasattr(opts, dest):
+        setattr(opts, dest, default)
+
+    if self.logging_enabled:
+      value = self.SetupLogging(opts)
+      if self.debug_enabled:
+        opts.debug = (value == 'DEBUG')
+      opts.verbose = value in ('INFO', 'DEBUG')
+
+    if self.caching:
+      path = os.environ.get(constants.SHARED_CACHE_ENVVAR)
+      if path is not None and opts.cache_dir is None:
+        opts.cache_dir = os.path.abspath(path)
+
+      opts.cache_dir_specified = opts.cache_dir is not None
+      if not opts.cache_dir_specified:
+        func = self.FindCacheDir if not callable(self.caching) else self.caching
+        opts.cache_dir = func(self, opts)
+      if opts.cache_dir is not None:
+        self.ConfigureCacheDir(opts.cache_dir)
+
+    return opts, args
+
+  @staticmethod
+  def ConfigureCacheDir(cache_dir):
+    if cache_dir is None:
+      os.environ.pop(constants.SHARED_CACHE_ENVVAR, None)
+      logging.debug('Removed cache_dir setting')
+    else:
+      os.environ[constants.SHARED_CACHE_ENVVAR] = cache_dir
+      logging.debug('Configured cache_dir to %r', cache_dir)
+
+  @classmethod
+  def FindCacheDir(cls, _parser, _opts):
+    logging.debug('Cache dir lookup.')
+    return path_util.FindCacheDir()
+
+
[email protected]_metaclass(attrs_freezer.Class)
+class ArgumentNamespace(argparse.Namespace):
+  """Class to mimic argparse.Namespace with value freezing support."""
+  _FROZEN_ERR_MSG = 'Option values are frozen, cannot alter %s.'
+
+
+# Note that because optparse.Values is not a new-style class this class
+# must use the mixin rather than the metaclass.
+class OptionValues(attrs_freezer.Mixin, optparse.Values):
+  """Class to mimic optparse.Values with value freezing support."""
+  _FROZEN_ERR_MSG = 'Option values are frozen, cannot alter %s.'
+
+  def __init__(self, defaults, *args, **kwargs):
+    attrs_freezer.Mixin.__init__(self)
+    optparse.Values.__init__(self, defaults, *args, **kwargs)
+
+    # Used by FilteringParser.
+    self.parsed_args = None
+
+
+PassedOption = collections.namedtuple(
+    'PassedOption', ['opt_inst', 'opt_str', 'value_str'])
+
+
+class FilteringParser(optparse.OptionParser, BaseParser):
+  """Custom option parser for filtering options.
+
+  Aside from adding a couple of types (path for absolute paths,
+  gs_path for google storage urls, and log_level for logging level control),
+  this additionally exposes logging control by default; if undesired,
+  either derive from this class setting ALLOW_LOGGING to False, or
+  pass in logging=False to the constructor.
+  """
+
+  DEFAULT_OPTION_CLASS = FilteringOption
+
+  def __init__(self, usage=None, **kwargs):
+    BaseParser.__init__(self, **kwargs)
+    self.PopUsedArgs(kwargs)
+    kwargs.setdefault('option_class', self.DEFAULT_OPTION_CLASS)
+    optparse.OptionParser.__init__(self, usage=usage, **kwargs)
+    self.SetupOptions()
+
+  def add_common_argument_to_group(self, group, *args, **kwargs):
+    """Adds the given option defined by args and kwargs to group."""
+    return group.add_option(*args, **kwargs)
+
+  def add_argument_group(self, *args, **kwargs):
+    """Return an option group rather than an argument group."""
+    return self.add_option_group(*args, **kwargs)
+
+  def parse_args(self, args=None, values=None):
+    # If no Values object is specified then use our custom OptionValues.
+    if values is None:
+      values = OptionValues(defaults=self.defaults)
+
+    values.parsed_args = []
+
+    opts, remaining = optparse.OptionParser.parse_args(
+        self, args=args, values=values)
+    return self.DoPostParseSetup(opts, remaining)
+
+  def AddParsedArg(self, opt_inst, opt_str, value_str):
+    """Add a parsed argument with attributes.
+
+    Args:
+      opt_inst: An instance of a raw optparse.Option object that represents the
+                option.
+      opt_str: The option string.
+      value_str: A list of string-ified values dentified by OptParse.
+    """
+    self.values.parsed_args.append(PassedOption(opt_inst, opt_str, value_str))
+
+  @staticmethod
+  def FilterArgs(parsed_args, filter_fn):
+    """Filter the argument by passing it through a function.
+
+    Args:
+      parsed_args: The list of parsed argument namedtuples to filter.  Tuples
+        are of the form (opt_inst, opt_str, value_str).
+      filter_fn: A function with signature f(PassedOption), and returns True if
+        the argument is to be passed through.  False if not.
+
+    Returns:
+      A tuple containing two lists - one of accepted arguments and one of
+      removed arguments.
+    """
+    removed = []
+    accepted = []
+    for arg in parsed_args:
+      target = accepted if filter_fn(arg) else removed
+      target.append(arg.opt_str)
+      target.extend(arg.value_str)
+
+    return accepted, removed
+
+
+class ArgumentParser(BaseParser, argparse.ArgumentParser):
+  """Custom argument parser for use by chromite.
+
+  This class additionally exposes logging control by default; if undesired,
+  either derive from this class setting ALLOW_LOGGING to False, or
+  pass in logging=False to the constructor.
+  """
+
+  def __init__(self, usage=None, **kwargs):
+    kwargs.setdefault('formatter_class', argparse.RawDescriptionHelpFormatter)
+    BaseParser.__init__(self, **kwargs)
+    self.PopUsedArgs(kwargs)
+    argparse.ArgumentParser.__init__(self, usage=usage, **kwargs)
+    self._SetupTypes()
+    self.SetupOptions()
+    self._RegisterActions()
+
+  def _SetupTypes(self):
+    """Register types with ArgumentParser."""
+    for t, check_f in VALID_TYPES.items():
+      self.register('type', t, check_f)
+    for a, class_a in VALID_ACTIONS.items():
+      self.register('action', a, class_a)
+
+  def _RegisterActions(self):
+    """Update the container's actions.
+
+    This method builds out a new action class to register for each action type.
+    The new action class allows handling the deprecated argument without any
+    other changes to the argument parser logic. See _DeprecatedAction.
+    """
+    for action in _DEPRECATE_ACTIONS:
+      current_class = self._registry_get('action', action, object)
+      # Base classes for the new class. The _DeprecatedAction must be first to
+      # ensure its method overrides are called first.
+      bases = (_DeprecatedAction, current_class)
+      try:
+        self.register('action', action, type('deprecated-wrapper', bases, {}))
+      except TypeError:
+        # Method resolution order error. This occurs when the _DeprecatedAction
+        # class is inherited multiple times, so we've already registered the
+        # replacement class. The underlying _ActionsContainer gets passed
+        # around, so this may get triggered in non-obvious ways.
+        continue
+
+  def add_common_argument_to_group(self, group, *args, **kwargs):
+    """Adds the given argument to the group.
+
+    This argument is expected to show up across the base parser and subparsers
+    that might be added later on.  The default argparse module does not handle
+    this scenario well -- it processes the base parser first (defaults and the
+    user arguments), then it processes the subparser (defaults and arguments).
+    That means defaults in the subparser will clobber user arguments passed in
+    to the base parser!
+    """
+    default = kwargs.pop('default', None)
+    kwargs['default'] = argparse.SUPPRESS
+    action = group.add_argument(*args, **kwargs)
+    self._cros_defaults.setdefault(action.dest, default)
+    return action
+
+  def parse_args(self, args=None, namespace=None):
+    """Translates OptionParser call to equivalent ArgumentParser call."""
+    # If no Namespace object is specified then use our custom ArgumentNamespace.
+    if namespace is None:
+      namespace = ArgumentNamespace()
+
+    # Unlike OptionParser, ArgParser works only with a single namespace and no
+    # args. Re-use BaseParser DoPostParseSetup but only take the namespace.
+    namespace = argparse.ArgumentParser.parse_args(
+        self, args=args, namespace=namespace)
+    return self.DoPostParseSetup(namespace, None)[0]
+
+
+class _ShutDownException(SystemExit):
+  """Exception raised when user hits CTRL+C."""
+
+  def __init__(self, sig_num, message):
+    self.signal = sig_num
+    # Setup a usage message primarily for any code that may intercept it
+    # while this exception is crashing back up the stack to us.
+    SystemExit.__init__(self, 128 + sig_num)
+    self.args = (sig_num, message)
+
+  def __str__(self):
+    """Stringify this exception."""
+    return self.args[1]
+
+
+def _DefaultHandler(signum, _frame):
+  # Don't double process sigterms; just trigger shutdown from the first
+  # exception.
+  signal.signal(signum, signal.SIG_IGN)
+  raise _ShutDownException(
+      signum, 'Received signal %i; shutting down' % (signum,))
+
+
+def _RestartInChroot(cmd, chroot_args, extra_env):
+  """Rerun inside the chroot.
+
+  Args:
+    cmd: Command line to run inside the chroot as a list of strings.
+    chroot_args: Arguments to pass directly to cros_sdk (or None).
+    extra_env: Dictionary of environmental variables to set inside the
+        chroot (or None).
+  """
+  return cros_build_lib.run(cmd, check=False, enter_chroot=True,
+                            chroot_args=chroot_args, extra_env=extra_env,
+                            cwd=constants.SOURCE_ROOT).returncode
+
+
+def RunInsideChroot(command=None, chroot_args=None):
+  """Restart the current command inside the chroot.
+
+  This method is only valid for any code that is run via ScriptWrapperMain.
+  It allows proper cleanup of the local context by raising an exception handled
+  in ScriptWrapperMain.
+
+  Args:
+    command: An instance of CliCommand to be restarted inside the chroot.
+             |command| can be None if you do not wish to modify the log_level.
+    chroot_args: List of command-line arguments to pass to cros_sdk, if invoked.
+  """
+  if cros_build_lib.IsInsideChroot():
+    return
+
+  # Produce the command line to execute inside the chroot.
+  argv = sys.argv[:]
+  argv[0] = path_util.ToChrootPath(argv[0])
+
+  # Set log-level of cros_sdk to be same as log-level of command entering the
+  # chroot.
+  if chroot_args is None:
+    chroot_args = []
+  if command is not None:
+    chroot_args += ['--log-level', command.options.log_level]
+
+  raise ChrootRequiredError(argv, chroot_args)
+
+
+def ReExec():
+  """Restart the current command.
+
+  This method is only valid for any code that is run via ScriptWrapperMain.
+  It allows proper cleanup of the local context by raising an exception handled
+  in ScriptWrapperMain.
+  """
+  # The command to exec.
+  raise ExecRequiredError(sys.argv[:])
+
+
+def ScriptWrapperMain(find_target_func, argv=None,
+                      log_level=logging.DEBUG,
+                      log_format=constants.LOGGER_FMT):
+  """Function usable for chromite.script.* style wrapping.
+
+  Note that this function invokes sys.exit on the way out by default.
+
+  Args:
+    find_target_func: a function, which, when given the absolute
+      pathway the script was invoked via (for example,
+      /home/ferringb/cros/trunk/chromite/bin/cros_sdk; note that any
+      trailing .py from the path name will be removed),
+      will return the main function to invoke (that functor will take
+      a single arg- a list of arguments, and shall return either None
+      or an integer, to indicate the exit code).
+    argv: sys.argv, or an equivalent tuple for testing.  If nothing is
+      given, sys.argv is defaulted to.
+    log_level: Default logging level to start at.
+    log_format: Default logging format to use.
+  """
+  if argv is None:
+    argv = sys.argv[:]
+  target = os.path.abspath(argv[0])
+  name = os.path.basename(target)
+  if target.endswith('.py'):
+    target = os.path.splitext(target)[0]
+  target = find_target_func(target)
+  if target is None:
+    print('Internal error detected- no main functor found in module %r.' %
+          (name,), file=sys.stderr)
+    sys.exit(100)
+
+  # Set up basic logging information for all modules that use logging.
+  # Note a script target may setup default logging in its module namespace
+  # which will take precedence over this.
+  logger = logging.getLogger()
+  logger.setLevel(log_level)
+  logger_handler = ChromiteStreamHandler()
+  logger_handler.setFormatter(
+      logging.Formatter(fmt=log_format, datefmt=constants.LOGGER_DATE_FMT))
+  logger.addHandler(logger_handler)
+  logging.captureWarnings(True)
+
+  signal.signal(signal.SIGTERM, _DefaultHandler)
+
+  ret = 1
+  try:
+    ret = target(argv[1:])
+  except _ShutDownException as e:
+    sys.stdout.flush()
+    print('%s: Signaled to shutdown: caught %i signal.' % (name, e.signal),
+          file=sys.stderr)
+    sys.stderr.flush()
+  except SystemExit as e:
+    # Right now, let this crash through- longer term, we'll update the scripts
+    # in question to not use sys.exit, and make this into a flagged error.
+    raise
+  except ChrootRequiredError as e:
+    ret = _RestartInChroot(e.cmd, e.chroot_args, e.extra_env)
+  except ExecRequiredError as e:
+    logging.shutdown()
+    # This does not return.
+    os.execv(e.cmd[0], e.cmd)
+  except Exception as e:
+    sys.stdout.flush()
+    print('%s: Unhandled exception:' % (name,), file=sys.stderr)
+    sys.stderr.flush()
+    raise
+  finally:
+    logging.shutdown()
+
+  if ret is None:
+    ret = 0
+  sys.exit(ret)
diff --git a/utils/frozen_chromite/lib/config_lib.py b/utils/frozen_chromite/lib/config_lib.py
new file mode 100644
index 0000000..526b505
--- /dev/null
+++ b/utils/frozen_chromite/lib/config_lib.py
@@ -0,0 +1,2090 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Configuration options for various cbuildbot builders."""
+
+from __future__ import print_function
+
+import copy
+import itertools
+import json
+import numbers
+import os
+import re
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.utils import memoize
+
+GS_PATH_DEFAULT = 'default'  # Means gs://chromeos-image-archive/ + bot_id
+
+# Contains the valid build config suffixes.
+CONFIG_TYPE_RELEASE = 'release'
+CONFIG_TYPE_FULL = 'full'
+CONFIG_TYPE_FIRMWARE = 'firmware'
+CONFIG_TYPE_FACTORY = 'factory'
+CONFIG_TYPE_TOOLCHAIN = 'toolchain'
+
+# DISPLAY labels are used to group related builds together in the GE UI.
+
+DISPLAY_LABEL_TRYJOB = 'tryjob'
+DISPLAY_LABEL_INCREMENATAL = 'incremental'
+DISPLAY_LABEL_FULL = 'full'
+DISPLAY_LABEL_CHROME_INFORMATIONAL = 'chrome_informational'
+DISPLAY_LABEL_INFORMATIONAL = 'informational'
+DISPLAY_LABEL_RELEASE = 'release'
+DISPLAY_LABEL_CHROME_PFQ = 'chrome_pfq'
+DISPLAY_LABEL_MST_ANDROID_PFQ = 'mst_android_pfq'
+DISPLAY_LABEL_VMMST_ANDROID_PFQ = 'vmmst_android_pfq'
+DISPLAY_LABEL_PI_ANDROID_PFQ = 'pi_android_pfq'
+DISPLAY_LABEL_QT_ANDROID_PFQ = 'qt_android_pfq'
+DISPLAY_LABEL_RVC_ANDROID_PFQ = 'rvc_android_pfq'
+DISPLAY_LABEL_VMRVC_ANDROID_PFQ = 'vmrvc_android_pfq'
+DISPLAY_LABEL_FIRMWARE = 'firmware'
+DISPLAY_LABEL_FACTORY = 'factory'
+DISPLAY_LABEL_TOOLCHAIN = 'toolchain'
+DISPLAY_LABEL_UTILITY = 'utility'
+DISPLAY_LABEL_PRODUCTION_TRYJOB = 'production_tryjob'
+
+# This list of constants should be kept in sync with GoldenEye code.
+ALL_DISPLAY_LABEL = {
+    DISPLAY_LABEL_TRYJOB,
+    DISPLAY_LABEL_INCREMENATAL,
+    DISPLAY_LABEL_FULL,
+    DISPLAY_LABEL_CHROME_INFORMATIONAL,
+    DISPLAY_LABEL_INFORMATIONAL,
+    DISPLAY_LABEL_RELEASE,
+    DISPLAY_LABEL_CHROME_PFQ,
+    DISPLAY_LABEL_MST_ANDROID_PFQ,
+    DISPLAY_LABEL_VMMST_ANDROID_PFQ,
+    DISPLAY_LABEL_PI_ANDROID_PFQ,
+    DISPLAY_LABEL_QT_ANDROID_PFQ,
+    DISPLAY_LABEL_RVC_ANDROID_PFQ,
+    DISPLAY_LABEL_VMRVC_ANDROID_PFQ,
+    DISPLAY_LABEL_FIRMWARE,
+    DISPLAY_LABEL_FACTORY,
+    DISPLAY_LABEL_TOOLCHAIN,
+    DISPLAY_LABEL_UTILITY,
+    DISPLAY_LABEL_PRODUCTION_TRYJOB,
+}
+
+# These values must be kept in sync with the ChromeOS LUCI builders.
+#
+# https://chrome-internal.googlesource.com/chromeos/
+#     infra/config/+/refs/heads/master/luci/cr-buildbucket.cfg
+LUCI_BUILDER_FACTORY = 'Factory'
+LUCI_BUILDER_FULL = 'Full'
+LUCI_BUILDER_INCREMENTAL = 'Incremental'
+LUCI_BUILDER_INFORMATIONAL = 'Informational'
+LUCI_BUILDER_INFRA = 'Infra'
+LUCI_BUILDER_LEGACY_RELEASE = 'LegacyRelease'
+LUCI_BUILDER_PFQ = 'PFQ'
+LUCI_BUILDER_RAPID = 'Rapid'
+LUCI_BUILDER_RELEASE = 'Release'
+LUCI_BUILDER_STAGING = 'Staging'
+LUCI_BUILDER_TRY = 'Try'
+
+ALL_LUCI_BUILDER = {
+    LUCI_BUILDER_FACTORY,
+    LUCI_BUILDER_FULL,
+    LUCI_BUILDER_INCREMENTAL,
+    LUCI_BUILDER_INFORMATIONAL,
+    LUCI_BUILDER_INFRA,
+    LUCI_BUILDER_LEGACY_RELEASE,
+    LUCI_BUILDER_PFQ,
+    LUCI_BUILDER_RAPID,
+    LUCI_BUILDER_RELEASE,
+    LUCI_BUILDER_STAGING,
+    LUCI_BUILDER_TRY,
+}
+
+
+def isTryjobConfig(build_config):
+  """Is a given build config a tryjob config, or a production config?
+
+  Args:
+    build_config: A fully populated instance of BuildConfig.
+
+  Returns:
+    Boolean. True if it's a tryjob config.
+  """
+  return build_config.luci_builder in [LUCI_BUILDER_RAPID, LUCI_BUILDER_TRY]
+
+# In the Json, this special build config holds the default values for all
+# other configs.
+DEFAULT_BUILD_CONFIG = '_default'
+
+# Constants for config template file
+CONFIG_TEMPLATE_BOARDS = 'boards'
+CONFIG_TEMPLATE_NAME = 'name'
+CONFIG_TEMPLATE_EXPERIMENTAL = 'experimental'
+CONFIG_TEMPLATE_LEADER_BOARD = 'leader_board'
+CONFIG_TEMPLATE_BOARD_GROUP = 'board_group'
+CONFIG_TEMPLATE_BUILDER = 'builder'
+CONFIG_TEMPLATE_RELEASE = 'RELEASE'
+CONFIG_TEMPLATE_CONFIGS = 'configs'
+CONFIG_TEMPLATE_ARCH = 'arch'
+CONFIG_TEMPLATE_RELEASE_BRANCH = 'release_branch'
+CONFIG_TEMPLATE_REFERENCE_BOARD_NAME = 'reference_board_name'
+CONFIG_TEMPLATE_MODELS = 'models'
+CONFIG_TEMPLATE_MODEL_NAME = 'name'
+CONFIG_TEMPLATE_MODEL_BOARD_NAME = 'board_name'
+CONFIG_TEMPLATE_MODEL_TEST_SUITES = 'test_suites'
+CONFIG_TEMPLATE_MODEL_CQ_TEST_ENABLED = 'cq_test_enabled'
+
+CONFIG_X86_INTERNAL = 'X86_INTERNAL'
+CONFIG_X86_EXTERNAL = 'X86_EXTERNAL'
+CONFIG_ARM_INTERNAL = 'ARM_INTERNAL'
+CONFIG_ARM_EXTERNAL = 'ARM_EXTERNAL'
+
+
+def IsCanaryMaster(builder_run):
+  """Returns True if this build type is master-release"""
+  return (builder_run.config.build_type == constants.CANARY_TYPE and
+          builder_run.config.master and
+          builder_run.manifest_branch == 'master')
+
+def IsPFQType(b_type):
+  """Returns True if this build type is a PFQ."""
+  return b_type in (constants.PFQ_TYPE, constants.ANDROID_PFQ_TYPE)
+
+
+def IsCanaryType(b_type):
+  """Returns True if this build type is a Canary."""
+  return b_type == constants.CANARY_TYPE
+
+
+def IsMasterAndroidPFQ(config):
+  """Returns True if this build is master Android PFQ type."""
+  return config.build_type == constants.ANDROID_PFQ_TYPE and config.master
+
+
+def GetHWTestEnv(builder_run_config, model_config=None, suite_config=None):
+  """Return the env of a suite to run for a given build/model.
+
+  Args:
+    builder_run_config: The BuildConfig object inside a BuilderRun object.
+    model_config: A ModelTestConfig object to test against.
+    suite_config: A HWTestConfig object to test against.
+
+  Returns:
+    A string variable to indiate the hwtest environment.
+  """
+  enable_suite = True if suite_config is None else suite_config.enable_skylab
+  enable_model = True if model_config is None else model_config.enable_skylab
+  if (builder_run_config.enable_skylab_hw_tests and enable_suite and
+      enable_model):
+    return constants.ENV_SKYLAB
+
+  return constants.ENV_AUTOTEST
+
+
+class AttrDict(dict):
+  """Dictionary with 'attribute' access.
+
+  This is identical to a dictionary, except that string keys can be addressed as
+  read-only attributes.
+  """
+
+  def __getattr__(self, name):
+    """Support attribute-like access to each dict entry."""
+    if name in self:
+      return self[name]
+
+    # Super class (dict) has no __getattr__ method, so use __getattribute__.
+    return super(AttrDict, self).__getattribute__(name)
+
+
+class BuildConfig(AttrDict):
+  """Dictionary of explicit configuration settings for a cbuildbot config
+
+  Each dictionary entry is in turn a dictionary of config_param->value.
+
+  See DefaultSettings for details on known configurations, and their
+  documentation.
+  """
+
+  def deepcopy(self):
+    """Create a deep copy of this object.
+
+    This is a specialized version of copy.deepcopy() for BuildConfig objects. It
+    speeds up deep copies by 10x because we know in advance what is stored
+    inside a BuildConfig object and don't have to do as much introspection. This
+    function is called a lot during setup of the config objects so optimizing it
+    makes a big difference. (It saves seconds off the load time of this module!)
+    """
+    result = BuildConfig(self)
+
+    # Here is where we handle all values that need deepcopy instead of shallow.
+    for k, v in result.items():
+      if v is not None:
+        if k == 'child_configs':
+          result[k] = [x.deepcopy() for x in v]
+        elif k in ('vm_tests', 'vm_tests_override', 'hw_tests',
+                   'hw_tests_override', 'tast_vm_tests'):
+          result[k] = [copy.copy(x) for x in v]
+        # type(v) is faster than isinstance.
+        elif type(v) is list:  # pylint: disable=unidiomatic-typecheck
+          result[k] = v[:]
+
+    return result
+
+  def apply(self, *args, **kwargs):
+    """Apply changes to this BuildConfig.
+
+    Note: If an override is callable, it will be called and passed the prior
+    value for the given key (or None) to compute the new value.
+
+    Args:
+      args: Dictionaries or templates to update this config with.
+      kwargs: Settings to inject; see DefaultSettings for valid values.
+
+    Returns:
+      self after changes are applied.
+    """
+    inherits = list(args)
+    inherits.append(kwargs)
+
+    for update_config in inherits:
+      for name, value in update_config.items():
+        if callable(value):
+          # If we are applying to a fixed value, we resolve to a fixed value.
+          # Otherwise, we save off a callable to apply later, perhaps with
+          # nested callables (IE: we curry them). This allows us to use
+          # callables in templates, and apply templates to each other and still
+          # get the expected result when we use them later on.
+          #
+          # Delaying the resolution of callables is safe, because "Add()" always
+          # applies against the default, which has fixed values for everything.
+
+          if name in self:
+            # apply it to the current value.
+            if callable(self[name]):
+              # If we have no fixed value to resolve with, stack the callables.
+              def stack(new_callable, old_callable):
+                """Helper method to isolate namespace for closure."""
+                return lambda fixed: new_callable(old_callable(fixed))
+
+              self[name] = stack(value, self[name])
+            else:
+              # If the current value was a fixed value, apply the callable.
+              self[name] = value(self[name])
+          else:
+            # If we had no value to apply it to, save it for later.
+            self[name] = value
+
+        elif name == '_template':
+          # We never apply _template. You have to set it through Add.
+          pass
+
+        else:
+          # Simple values overwrite whatever we do or don't have.
+          self[name] = value
+
+    return self
+
+  def derive(self, *args, **kwargs):
+    """Create a new config derived from this one.
+
+    Note: If an override is callable, it will be called and passed the prior
+    value for the given key (or None) to compute the new value.
+
+    Args:
+      args: Mapping instances to mixin.
+      kwargs: Settings to inject; see DefaultSettings for valid values.
+
+    Returns:
+      A new _config instance.
+    """
+    return self.deepcopy().apply(*args, **kwargs)
+
+  def AddSlave(self, slave):
+    """Assign slave config(s) to a build master.
+
+    A helper for adding slave configs to a master config.
+    """
+    assert self.master
+    if self['slave_configs'] is None:
+      self['slave_configs'] = []
+    self.slave_configs.append(slave.name)
+    self.slave_configs.sort()
+
+  def AddSlaves(self, slaves):
+    """Assign slave config(s) to a build master.
+
+    A helper for adding slave configs to a master config.
+    """
+    assert self.master
+    if self['slave_configs'] is None:
+      self['slave_configs'] = []
+    self.slave_configs.extend(slave_config.name for slave_config in slaves)
+    self.slave_configs.sort()
+
+
+class VMTestConfig(object):
+  """Config object for virtual machine tests suites.
+
+  Attributes:
+    test_type: Test type to be run.
+    test_suite: Test suite to be run in VMTest.
+    timeout: Number of seconds to wait before timing out waiting for
+             results.
+    retry: Whether we should retry tests that fail in a suite run.
+    max_retries: Integer, maximum job retries allowed at suite level.
+                 None for no max.
+    warn_only: Boolean, failure on VM tests warns only.
+    use_ctest: Use the old ctest code path rather than the new chromite one.
+  """
+  DEFAULT_TEST_TIMEOUT = 90 * 60
+
+  def __init__(self,
+               test_type,
+               test_suite=None,
+               timeout=DEFAULT_TEST_TIMEOUT,
+               retry=False,
+               max_retries=constants.VM_TEST_MAX_RETRIES,
+               warn_only=False,
+               use_ctest=True):
+    """Constructor -- see members above."""
+    self.test_type = test_type
+    self.test_suite = test_suite
+    self.timeout = timeout
+    self.retry = retry
+    self.max_retries = max_retries
+    self.warn_only = warn_only
+    self.use_ctest = use_ctest
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+
+class GCETestConfig(object):
+  """Config object for GCE tests suites.
+
+  Attributes:
+    test_type: Test type to be run.
+    test_suite: Test suite to be run in GCETest.
+    timeout: Number of seconds to wait before timing out waiting for
+             results.
+    use_ctest: Use the old ctest code path rather than the new chromite one.
+  """
+  DEFAULT_TEST_TIMEOUT = 60 * 60
+
+  def __init__(self,
+               test_type,
+               test_suite=None,
+               timeout=DEFAULT_TEST_TIMEOUT,
+               use_ctest=True):
+    """Constructor -- see members above."""
+    self.test_type = test_type
+    self.test_suite = test_suite
+    self.timeout = timeout
+    self.use_ctest = use_ctest
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+
+class TastVMTestConfig(object):
+  """Config object for a Tast virtual-machine-based test suite.
+
+  Attributes:
+    name: String containing short human-readable name describing test suite.
+    test_exprs: List of string expressions describing which tests to run; this
+                is passed directly to the 'tast run' command. See
+                https://goo.gl/UPNEgT for info about test expressions.
+    timeout: Number of seconds to wait before timing out waiting for
+             results.
+  """
+  DEFAULT_TEST_TIMEOUT = 60 * 60
+
+  def __init__(self, suite_name, test_exprs, timeout=DEFAULT_TEST_TIMEOUT):
+    """Constructor -- see members above."""
+    # This is an easy mistake to make and results in confusing errors later when
+    # a list of one-character strings gets passed to the tast command.
+    if not isinstance(test_exprs, list):
+      raise TypeError('test_exprs must be list of strings')
+    self.suite_name = suite_name
+    self.test_exprs = test_exprs
+    self.timeout = timeout
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+
+class MoblabVMTestConfig(object):
+  """Config object for moblab tests suites.
+
+  Attributes:
+    test_type: Test type to be run.
+    timeout: Number of seconds to wait before timing out waiting for
+             results.
+  """
+  DEFAULT_TEST_TIMEOUT = 60 * 60
+
+  def __init__(self, test_type, timeout=DEFAULT_TEST_TIMEOUT):
+    """Constructor -- see members above."""
+    self.test_type = test_type
+    self.timeout = timeout
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+
+class ModelTestConfig(object):
+  """Model specific config that controls which test suites are executed.
+
+  Attributes:
+    name: The name of the model that will be tested (matches model label)
+    lab_board_name: The name of the board in the lab (matches board label)
+    test_suites: List of hardware test suites that will be executed.
+  """
+
+  def __init__(self, name, lab_board_name, test_suites=None,
+               enable_skylab=True):
+    """Constructor -- see members above."""
+    self.name = name
+    self.lab_board_name = lab_board_name
+    self.test_suites = test_suites
+    self.enable_skylab = enable_skylab
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+
+class HWTestConfig(object):
+  """Config object for hardware tests suites.
+
+  Attributes:
+    suite: Name of the test suite to run.
+    timeout: Number of seconds to wait before timing out waiting for
+             results.
+    pool: Pool to use for hw testing.
+    blocking: Setting this to true requires that this suite must PASS for suites
+              scheduled after it to run. This also means any suites that are
+              scheduled before a blocking one are also blocking ones scheduled
+              after. This should be used when you want some suites to block
+              whether or not others should run e.g. only run longer-running
+              suites if some core ones pass first.
+
+              Note, if you want multiple suites to block other suites but run
+              in parallel, you should only mark the last one scheduled as
+              blocking (it effectively serves as a thread/process join).
+    async: Fire-and-forget suite.
+    warn_only: Failure on HW tests warns only (does not generate error).
+    critical: Usually we consider structural failures here as OK.
+    priority:  Priority at which tests in the suite will be scheduled in
+               the hw lab.
+    file_bugs: Should we file bugs if a test fails in a suite run.
+    minimum_duts: minimum number of DUTs required for testing in the hw lab.
+    retry: Whether we should retry tests that fail in a suite run.
+    max_retries: Integer, maximum job retries allowed at suite level.
+                 None for no max.
+    suite_min_duts: Preferred minimum duts. Lab will prioritize on getting such
+                    number of duts even if the suite is competing with
+                    other suites that have higher priority.
+    suite_args: Arguments passed to the suite.  This should be a dict
+                representing keyword arguments.  The value is marshalled
+                using repr(), so the dict values should be basic types.
+    quota_account: The quotascheduler account to use for all tests in this
+                   suite.
+
+  Some combinations of member settings are invalid:
+    * A suite config may not specify both blocking and async.
+    * A suite config may not specify both warn_only and critical.
+  """
+  _MINUTE = 60
+  _HOUR = 60 * _MINUTE
+  _DAY = 24 * _HOUR
+  # CTS timeout ~ 2 * expected runtime in case other tests are using the CTS
+  # pool.
+  # Must not exceed the buildbucket build timeout set at
+  # https://chrome-internal.googlesource.com/chromeos/infra/config/+/8f12edac54383831aaed9ed1819ef909a66ecc97/testplatform/main.star#90
+  CTS_QUAL_HW_TEST_TIMEOUT = int(1 * _DAY + 18 * _HOUR)
+  # GTS runs faster than CTS. But to avoid starving GTS by CTS we set both
+  # timeouts equal.
+  GTS_QUAL_HW_TEST_TIMEOUT = CTS_QUAL_HW_TEST_TIMEOUT
+  SHARED_HW_TEST_TIMEOUT = int(3.0 * _HOUR)
+  PALADIN_HW_TEST_TIMEOUT = int(2.0 * _HOUR)
+  BRANCHED_HW_TEST_TIMEOUT = int(10.0 * _HOUR)
+
+  # TODO(jrbarnette) Async HW test phases complete within seconds.
+  # however, the tests they start can require hours to complete.
+  # Chromite code doesn't distinguish "timeout for Autotest" from
+  # timeout in the builder.  This is WRONG WRONG WRONG.  But, until
+  # there's a better fix, we'll allow these phases hours to fail.
+  ASYNC_HW_TEST_TIMEOUT = int(250.0 * _MINUTE)
+
+  def __init__(self,
+               suite,
+               pool=constants.HWTEST_QUOTA_POOL,
+               timeout=SHARED_HW_TEST_TIMEOUT,
+               warn_only=False,
+               critical=False,
+               blocking=False,
+               file_bugs=False,
+               priority=constants.HWTEST_BUILD_PRIORITY,
+               retry=True,
+               max_retries=constants.HWTEST_MAX_RETRIES,
+               minimum_duts=0,
+               suite_min_duts=0,
+               suite_args=None,
+               offload_failures_only=False,
+               enable_skylab=True,
+               quota_account=constants.HWTEST_QUOTA_ACCOUNT_BVT,
+               **kwargs):
+    """Constructor -- see members above."""
+    # Python 3.7+ made async a reserved keyword.
+    asynchronous = kwargs.pop('async', False)
+    setattr(self, 'async', asynchronous)
+    assert not kwargs, 'Excess kwargs found: %s' % (kwargs,)
+
+    assert not asynchronous or not blocking, '%s is async and blocking' % suite
+    assert not warn_only or not critical
+    self.suite = suite
+    self.pool = pool
+    self.timeout = timeout
+    self.blocking = blocking
+    self.warn_only = warn_only
+    self.critical = critical
+    self.file_bugs = file_bugs
+    self.priority = priority
+    self.retry = retry
+    self.max_retries = max_retries
+    self.minimum_duts = minimum_duts
+    self.suite_min_duts = suite_min_duts
+    self.suite_args = suite_args
+    self.offload_failures_only = offload_failures_only
+    # Usually whether to run in skylab is controlled by 'enable_skylab_hw_test'
+    # in build config. But for some particular suites, we want to exclude them
+    # from Skylab even if the build config is migrated to Skylab.
+    self.enable_skylab = enable_skylab
+    self.quota_account = quota_account
+
+  def _SetCommonBranchedValues(self):
+    """Set the common values for branched builds."""
+    self.timeout = max(HWTestConfig.BRANCHED_HW_TEST_TIMEOUT, self.timeout)
+
+    # Set minimum_duts default to 0, which means that lab will not check the
+    # number of available duts to meet the minimum requirement before creating
+    # a suite job for branched build.
+    self.minimum_duts = 0
+
+  def SetBranchedValuesForSkylab(self):
+    """Set suite values for branched builds for skylab."""
+    self._SetCommonBranchedValues()
+
+    if (constants.SKYLAB_HWTEST_PRIORITIES_MAP[self.priority] < constants
+        .SKYLAB_HWTEST_PRIORITIES_MAP[constants.HWTEST_DEFAULT_PRIORITY]):
+      self.priority = constants.HWTEST_DEFAULT_PRIORITY
+
+  def SetBranchedValues(self):
+    """Changes the HW Test timeout/priority values to branched values."""
+    self._SetCommonBranchedValues()
+
+    # Only reduce priority if it's lower.
+    new_priority = constants.HWTEST_PRIORITIES_MAP[constants
+                                                   .HWTEST_DEFAULT_PRIORITY]
+    if isinstance(self.priority, numbers.Integral):
+      self.priority = min(self.priority, new_priority)
+    elif constants.HWTEST_PRIORITIES_MAP[self.priority] > new_priority:
+      self.priority = new_priority
+
+  @property
+  def timeout_mins(self):
+    return self.timeout // 60
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+
+class NotificationConfig(object):
+  """Config object for defining notification settings.
+
+  Attributes:
+    email: Email address that receives failure notifications.
+    threshold: Number of consecutive failures that should occur in order to
+              be notified. This number should be greater than or equal to 1. If
+              none is specified, default is 1.
+    template: Email template luci-notify should use when sending the email
+              notification. If none is specified, uses the default template.
+  """
+  DEFAULT_TEMPLATE = 'legacy_release'
+  DEFAULT_THRESHOLD = 1
+
+  def __init__(self,
+               email,
+               threshold=DEFAULT_THRESHOLD,
+               template=DEFAULT_TEMPLATE):
+    """Constructor -- see members above."""
+    self.email = email
+    self.threshold = threshold
+    self.template = template
+    self.threshold = threshold
+
+  @property
+  def email_notify(self):
+    return {'email': self.email, 'template': self.template}
+
+  def __eq__(self, other):
+    return self.__dict__ == other.__dict__
+
+
+def DefaultSettings():
+  # Enumeration of valid settings; any/all config settings must be in this.
+  # All settings must be documented.
+  return dict(
+      # The name of the template we inherit settings from.
+      _template=None,
+
+      # The name of the config.
+      name=None,
+
+      # A list of boards to build.
+      boards=None,
+
+      # A list of ModelTestConfig objects that represent all of the models
+      # supported by a given unified build and their corresponding test config.
+      models=[],
+
+      # This value defines what part of the Golden Eye UI is responsible for
+      # displaying builds of this build config. The value is required, and
+      # must be in ALL_DISPLAY_LABEL.
+      # TODO: Make the value required after crbug.com/776955 is finished.
+      display_label=None,
+
+      # This defines which LUCI Builder to use. It must match an entry in:
+      #
+      # https://chrome-internal.git.corp.google.com/chromeos/
+      #    manifest-internal/+/infra/config/cr-buildbucket.cfg
+      #
+      luci_builder=LUCI_BUILDER_LEGACY_RELEASE,
+
+      # The profile of the variant to set up and build.
+      profile=None,
+
+      # This bot pushes changes to the overlays.
+      master=False,
+
+      # A basic_builder is a special configuration which does not perform tests
+      # or mutate external config.
+      basic_builder=False,
+
+      # If this bot triggers slave builds, this will contain a list of
+      # slave config names.
+      slave_configs=None,
+
+      # If False, this flag indicates that the CQ should not check whether
+      # this bot passed or failed. Set this to False if you are setting up a
+      # new bot. Once the bot is on the waterfall and is consistently green,
+      # mark the builder as important=True.
+      important=True,
+
+      # If True, build config should always be run as if --debug was set
+      # on the cbuildbot command line. This is different from 'important'
+      # and is usually correlated with tryjob build configs.
+      debug=False,
+
+      # If True, use the debug instance of CIDB instead of prod.
+      debug_cidb=False,
+
+      # Timeout for the build as a whole (in seconds).
+      build_timeout=(5 * 60 + 30) * 60,
+
+      # A list of NotificationConfig objects describing who to notify of builder
+      # failures.
+      notification_configs=[],
+
+      # An integer. If this builder fails this many times consecutively, send
+      # an alert email to the recipients health_alert_recipients. This does
+      # not apply to tryjobs. This feature is similar to the ERROR_WATERMARK
+      # feature of upload_symbols, and it may make sense to merge the features
+      # at some point.
+      health_threshold=0,
+
+      # List of email addresses to send health alerts to for this builder. It
+      # supports automatic email address lookup for the following sheriff
+      # types:
+      #     'tree': tree sheriffs
+      #     'chrome': chrome gardeners
+      health_alert_recipients=[],
+
+      # Whether this is an internal build config.
+      internal=False,
+
+      # Whether this is a branched build config. Used for pfq logic.
+      branch=False,
+
+      # The name of the manifest to use. E.g., to use the buildtools manifest,
+      # specify 'buildtools'.
+      manifest=constants.DEFAULT_MANIFEST,
+
+      # emerge use flags to use while setting up the board, building packages,
+      # making images, etc.
+      useflags=[],
+
+      # Set the variable CHROMEOS_OFFICIAL for the build. Known to affect
+      # parallel_emerge, cros_set_lsb_release, and chromeos_version.sh. See
+      # bug chromium-os:14649
+      chromeos_official=False,
+
+      # Use binary packages for building the toolchain. (emerge --getbinpkg)
+      usepkg_toolchain=True,
+
+      # Use binary packages for build_packages and setup_board.
+      usepkg_build_packages=True,
+
+      # Does this profile need to sync chrome?  If None, we guess based on
+      # other factors.  If True/False, we always do that.
+      sync_chrome=None,
+
+      # Use the newest ebuilds for all the toolchain packages.
+      latest_toolchain=False,
+
+      # This is only valid when latest_toolchain is True. If you set this to a
+      # commit-ish, the gcc ebuild will use it to build the toolchain
+      # compiler.
+      gcc_githash=None,
+
+      # Wipe and replace the board inside the chroot.
+      board_replace=False,
+
+      # Wipe and replace chroot, but not source.
+      chroot_replace=True,
+
+      # Create the chroot on a loopback-mounted chroot.img instead of a bare
+      # directory.  Required for snapshots; otherwise optional.
+      chroot_use_image=True,
+
+      # Uprevs the local ebuilds to build new changes since last stable.
+      # build.  If master then also pushes these changes on success. Note that
+      # we uprev on just about every bot config because it gives us a more
+      # deterministic build system (the tradeoff being that some bots build
+      # from source more frequently than if they never did an uprev). This way
+      # the release/factory/etc... builders will pick up changes that devs
+      # pushed before it runs, but after the correspoding PFQ bot ran (which
+      # is what creates+uploads binpkgs).  The incremental bots are about the
+      # only ones that don't uprev because they mimic the flow a developer
+      # goes through on their own local systems.
+      uprev=True,
+
+      # Select what overlays to look at for revving and prebuilts. This can be
+      # any constants.VALID_OVERLAYS.
+      overlays=constants.PUBLIC_OVERLAYS,
+
+      # Select what overlays to push at. This should be a subset of overlays
+      # for the particular builder.  Must be None if not a master.  There
+      # should only be one master bot pushing changes to each overlay per
+      # branch.
+      push_overlays=None,
+
+      # Uprev Android, values of 'latest_release', or None.
+      android_rev=None,
+
+      # Which Android branch build do we try to uprev from.
+      android_import_branch=None,
+
+      # Android package name.
+      android_package=None,
+
+      # Uprev Chrome, values of 'tot', 'stable_release', or None.
+      chrome_rev=None,
+
+      # Exit the builder right after checking compilation.
+      # TODO(mtennant): Should be something like "compile_check_only".
+      compilecheck=False,
+
+      # If True, run DebugInfoTest stage.
+      debuginfo_test=False,
+
+      # Runs the tests that the signer would run. This should only be set if
+      # 'recovery' is in images.
+      signer_tests=False,
+
+      # Runs unittests for packages.
+      unittests=True,
+
+      # A list of the packages to blacklist from unittests.
+      unittest_blacklist=[],
+
+      # Generates AFDO data. Will capture a profile of chrome using a hwtest
+      # to run a predetermined set of benchmarks.
+      # FIXME(tcwang): Keep this config during transition to async AFDO
+      afdo_generate=False,
+
+      # Generates AFDO data asynchronously. Will capture a profile of chrome
+      # using a hwtest to run a predetermined set of benchmarks.
+      afdo_generate_async=False,
+
+      # Verify and publish kernel profiles.
+      kernel_afdo_verify=False,
+
+      # Verify and publish chrome profiles.
+      chrome_afdo_verify=False,
+
+      # Generate Chrome orderfile. Will build Chrome with C3 ordering and
+      # generate an orderfile for uploading as a result.
+      orderfile_generate=False,
+
+      # Verify unvetted Chrome orderfile. Will use the most recent unvetted
+      # orderfile and build Chrome. Upload the orderfile to vetted bucket
+      # as a result.
+      orderfile_verify=False,
+
+      # Generates AFDO data, builds the minimum amount of artifacts and
+      # assumes a non-distributed builder (i.e.: the whole process in a single
+      # builder).
+      afdo_generate_min=False,
+
+      # Update the Chrome ebuild with the AFDO profile info.
+      afdo_update_chrome_ebuild=False,
+
+      # Update the kernel ebuild with the AFDO profile info.
+      afdo_update_kernel_ebuild=False,
+
+      # Uses AFDO data. The Chrome build will be optimized using the AFDO
+      # profile information found in Chrome's source tree.
+      afdo_use=True,
+
+      # A list of VMTestConfig objects to run by default.
+      vm_tests=[
+          VMTestConfig(constants.VM_SUITE_TEST_TYPE, test_suite='smoke'),
+          VMTestConfig(constants.SIMPLE_AU_TEST_TYPE)
+      ],
+
+      # A list of all VMTestConfig objects to use if VM Tests are forced on
+      # (--vmtest command line or trybot). None means no override.
+      vm_tests_override=None,
+
+      # If true, in addition to upload vm test result to artifact folder, report
+      # results to other dashboard as well.
+      vm_test_report_to_dashboards=False,
+
+      # The number of times to run the VMTest stage. If this is >1, then we
+      # will run the stage this many times, stopping if we encounter any
+      # failures.
+      vm_test_runs=1,
+
+      # If True, run SkylabHWTestStage instead of HWTestStage for suites that
+      # use pools other than pool:cts.
+      enable_skylab_hw_tests=False,
+
+      # If set, this is the URL of the bug justifying why hw_tests are disabled
+      # on a builder that should always have hw_tests.
+      hw_tests_disabled_bug='',
+
+      # If True, run SkylabHWTestStage instead of HWTestStage for suites that
+      # use pool:cts.
+      enable_skylab_cts_hw_tests=False,
+
+      # A list of HWTestConfig objects to run.
+      hw_tests=[],
+
+      # A list of all HWTestConfig objects to use if HW Tests are forced on
+      # (--hwtest command line or trybot). None means no override.
+      hw_tests_override=None,
+
+      # If true, uploads artifacts for hw testing. Upload payloads for test
+      # image if the image is built. If not, dev image is used and then base
+      # image.
+      upload_hw_test_artifacts=True,
+
+      # If true, uploads individual image tarballs.
+      upload_standalone_images=True,
+
+      # A list of GCETestConfig objects to use. Currently only some lakitu
+      # builders run gce tests.
+      gce_tests=[],
+
+      # Whether to run CPEExport stage. This stage generates portage depgraph
+      # data that is used for bugs reporting (see go/why-cpeexport). Only
+      # release builders should run this stage.
+      run_cpeexport=False,
+
+      # Whether to run BuildConfigsExport stage. This stage generates build
+      # configs (see crbug.com/974795 project). Only release builders should
+      # run this stage.
+      run_build_configs_export=False,
+
+      # A list of TastVMTestConfig objects describing Tast-based test suites
+      # that should be run in a VM.
+      tast_vm_tests=[],
+
+      # Default to not run moblab tests. Currently the blessed moblab board runs
+      # these tests.
+      moblab_vm_tests=[],
+
+      # List of patterns for portage packages for which stripped binpackages
+      # should be uploaded to GS. The patterns are used to search for packages
+      # via `equery list`.
+      upload_stripped_packages=[
+          # Used by SimpleChrome workflow.
+          'chromeos-base/chromeos-chrome',
+          'sys-kernel/*kernel*',
+      ],
+
+      # Google Storage path to offload files to.
+      #   None - No upload
+      #   GS_PATH_DEFAULT - 'gs://chromeos-image-archive/' + bot_id
+      #   value - Upload to explicit path
+      gs_path=GS_PATH_DEFAULT,
+
+      # TODO(sosa): Deprecate binary.
+      # Type of builder.  Check constants.VALID_BUILD_TYPES.
+      build_type=constants.PFQ_TYPE,
+
+      # Whether to schedule test suites by suite_scheduler. Generally only
+      # True for "release" builders.
+      suite_scheduling=False,
+
+      # The class name used to build this config.  See the modules in
+      # cbuildbot / builders/*_builders.py for possible values.  This should
+      # be the name in string form -- e.g. "simple_builders.SimpleBuilder" to
+      # get the SimpleBuilder class in the simple_builders module.  If not
+      # specified, we'll fallback to legacy probing behavior until everyone
+      # has been converted (see the scripts/cbuildbot.py file for details).
+      builder_class_name=None,
+
+      # List of images we want to build -- see build_image for more details.
+      images=['test'],
+
+      # Image from which we will build update payloads.  Must either be None
+      # or name one of the images in the 'images' list, above.
+      payload_image=None,
+
+      # Whether to build a netboot image.
+      factory_install_netboot=True,
+
+      # Whether to build the factory toolkit.
+      factory_toolkit=True,
+
+      # Whether to build factory packages in BuildPackages.
+      factory=True,
+
+      # Flag to control if all packages for the target are built. If disabled
+      # and unittests are enabled, the unit tests and their dependencies
+      # will still be built during the testing stage.
+      build_packages=True,
+
+      # Tuple of specific packages we want to build.  Most configs won't
+      # specify anything here and instead let build_packages calculate.
+      packages=[],
+
+      # Do we push a final release image to chromeos-images.
+      push_image=False,
+
+      # Do we upload debug symbols.
+      upload_symbols=False,
+
+      # Whether we upload a hwqual tarball.
+      hwqual=False,
+
+      # Run a stage that generates release payloads for signed images.
+      paygen=False,
+
+      # If the paygen stage runs, generate tests, and schedule auto-tests for
+      # them.
+      paygen_skip_testing=False,
+
+      # If the paygen stage runs, don't generate any delta payloads. This is
+      # only done if deltas are broken for a given board.
+      paygen_skip_delta_payloads=False,
+
+      # Run a stage that generates and uploads package CPE information.
+      cpe_export=True,
+
+      # Run a stage that generates and uploads debug symbols.
+      debug_symbols=True,
+
+      # Do not package the debug symbols in the binary package. The debug
+      # symbols will be in an archive with the name cpv.debug.tbz2 in
+      # /build/${BOARD}/packages and uploaded with the prebuilt.
+      separate_debug_symbols=True,
+
+      # Include *.debug files for debugging core files with gdb in debug.tgz.
+      # These are very large. This option only has an effect if debug_symbols
+      # and archive are set.
+      archive_build_debug=False,
+
+      # Run a stage that archives build and test artifacts for developer
+      # consumption.
+      archive=True,
+
+      # Git repository URL for our manifests.
+      #  https://chromium.googlesource.com/chromiumos/manifest
+      #  https://chrome-internal.googlesource.com/chromeos/manifest-internal
+      manifest_repo_url=None,
+
+      # Whether we are using the manifest_version repo that stores per-build
+      # manifests.
+      manifest_version=False,
+
+      # Use a different branch of the project manifest for the build.
+      manifest_branch=None,
+
+      # LKGM for Chrome OS generated for Chrome builds that are blessed from
+      # canary runs.
+      use_chrome_lkgm=False,
+
+      # Upload prebuilts for this build. Valid values are PUBLIC, PRIVATE, or
+      # False.
+      prebuilts=False,
+
+      # Use SDK as opposed to building the chroot from source.
+      use_sdk=True,
+
+      # The description string to print out for config when user runs --list.
+      description=None,
+
+      # Boolean that enables parameter --git-sync for upload_prebuilts.
+      git_sync=False,
+
+      # A list of the child config groups, if applicable. See the AddGroup
+      # method.
+      child_configs=[],
+
+      # Whether this config belongs to a config group.
+      grouped=False,
+
+      # layout of build_image resulting image. See
+      # scripts/build_library/legacy_disk_layout.json or
+      # overlay-<board>/scripts/disk_layout.json for possible values.
+      disk_layout=None,
+
+      # If enabled, run the PatchChanges stage.  Enabled by default. Can be
+      # overridden by the --nopatch flag.
+      postsync_patch=True,
+
+      # Reexec into the buildroot after syncing.  Enabled by default.
+      postsync_reexec=True,
+
+      # Run the binhost_test stage. Only makes sense for builders that have no
+      # boards.
+      binhost_test=False,
+
+      # If specified, it is passed on to the PushImage script as '--sign-types'
+      # commandline argument.  Must be either None or a list of image types.
+      sign_types=None,
+
+      # TODO(sosa): Collapse to one option.
+      # ========== Dev installer prebuilts options =======================
+
+      # Upload prebuilts for this build to this bucket. If it equals None the
+      # default buckets are used.
+      binhost_bucket=None,
+
+      # Parameter --key for upload_prebuilts. If it equals None, the default
+      # values are used, which depend on the build type.
+      binhost_key=None,
+
+      # Parameter --binhost-base-url for upload_prebuilts. If it equals None,
+      # the default value is used.
+      binhost_base_url=None,
+
+      # Upload dev installer prebuilts.
+      dev_installer_prebuilts=False,
+
+      # Enable rootfs verification on the image.
+      rootfs_verification=True,
+
+      # Build the Chrome SDK.
+      chrome_sdk=False,
+
+      # If chrome_sdk is set to True, this determines whether we attempt to
+      # build Chrome itself with the generated SDK.
+      chrome_sdk_build_chrome=True,
+
+      # If chrome_sdk is set to True, this determines whether we use goma to
+      # build chrome.
+      chrome_sdk_goma=True,
+
+      # Run image tests. This should only be set if 'base' is in our list of
+      # images.
+      image_test=False,
+
+      # ==================================================================
+      # Workspace related options.
+
+      # Which branch should WorkspaceSyncStage checkout, if run.
+      workspace_branch=None,
+
+      # ==================================================================
+      # The documentation associated with the config.
+      doc=None,
+
+      # ==================================================================
+      # The goma related options.
+
+      # Which goma client to use.
+      goma_client_type=None,
+
+      # Try to use goma to build all packages.
+      build_all_with_goma=False,
+
+      # This is a LUCI Scheduler schedule string. Setting this will create
+      # a LUCI Scheduler for this build on swarming (not buildbot).
+      # See: https://goo.gl/VxSzFf
+      schedule=None,
+
+      # This is the list of git repos which can trigger this build in swarming.
+      # Implies that schedule is set, to "triggered".
+      # The format is of the form:
+      #   [ (<git repo url>, (<ref1>, <ref2>, …)),
+      #    …]
+      triggered_gitiles=None,
+
+      # If true, skip package retries in BuildPackages step.
+      nobuildretry=False,
+
+      # Attempt to run this build on the same bot each time it builds.
+      # This is only meaningful for slave builds run on swarming. This
+      # should only be used with LUCI Builders that use a reserved
+      # role to avoid having bots stolen by other builds while
+      # waiting on a new master build.
+      build_affinity=False,
+  )
+
+
+def GerritInstanceParameters(name, instance):
+  param_names = [
+      '_GOB_INSTANCE', '_GERRIT_INSTANCE', '_GOB_HOST', '_GERRIT_HOST',
+      '_GOB_URL', '_GERRIT_URL'
+  ]
+
+  gob_instance = instance
+  gerrit_instance = '%s-review' % instance
+  gob_host = constants.GOB_HOST % gob_instance
+  gerrit_host = constants.GOB_HOST % gerrit_instance
+  gob_url = 'https://%s' % gob_host
+  gerrit_url = 'https://%s' % gerrit_host
+
+  params = [
+      gob_instance, gerrit_instance, gob_host, gerrit_host, gob_url, gerrit_url
+  ]
+
+  return dict([('%s%s' % (name, pn), p) for pn, p in zip(param_names, params)])
+
+
+def DefaultSiteParameters():
+  # Enumeration of valid site parameters; any/all site parameters must be here.
+  # All site parameters should be documented.
+  default_site_params = {}
+
+  manifest_project = 'chromiumos/manifest'
+  manifest_int_project = 'chromeos/manifest-internal'
+  external_remote = 'cros'
+  internal_remote = 'cros-internal'
+  chromium_remote = 'chromium'
+  chrome_remote = 'chrome'
+  aosp_remote = 'aosp'
+  weave_remote = 'weave'
+
+  internal_change_prefix = 'chrome-internal:'
+  external_change_prefix = 'chromium:'
+
+  # Gerrit instance site parameters.
+  default_site_params.update(GerritInstanceParameters('EXTERNAL', 'chromium'))
+  default_site_params.update(
+      GerritInstanceParameters('INTERNAL', 'chrome-internal'))
+  default_site_params.update(GerritInstanceParameters('AOSP', 'android'))
+  default_site_params.update(
+      GerritInstanceParameters('WEAVE', 'weave'))
+
+  default_site_params.update(
+      # Parameters to define which manifests to use.
+      MANIFEST_PROJECT=manifest_project,
+      MANIFEST_INT_PROJECT=manifest_int_project,
+      MANIFEST_PROJECTS=(manifest_project, manifest_int_project),
+      MANIFEST_URL=os.path.join(default_site_params['EXTERNAL_GOB_URL'],
+                                manifest_project),
+      MANIFEST_INT_URL=os.path.join(default_site_params['INTERNAL_GERRIT_URL'],
+                                    manifest_int_project),
+
+      # CrOS remotes specified in the manifests.
+      EXTERNAL_REMOTE=external_remote,
+      INTERNAL_REMOTE=internal_remote,
+      GOB_REMOTES={
+          default_site_params['EXTERNAL_GOB_INSTANCE']: external_remote,
+          default_site_params['INTERNAL_GOB_INSTANCE']: internal_remote,
+      },
+      CHROMIUM_REMOTE=chromium_remote,
+      CHROME_REMOTE=chrome_remote,
+      AOSP_REMOTE=aosp_remote,
+      WEAVE_REMOTE=weave_remote,
+
+      # Only remotes listed in CROS_REMOTES are considered branchable.
+      # CROS_REMOTES and BRANCHABLE_PROJECTS must be kept in sync.
+      GERRIT_HOSTS={
+          external_remote: default_site_params['EXTERNAL_GERRIT_HOST'],
+          internal_remote: default_site_params['INTERNAL_GERRIT_HOST'],
+          aosp_remote: default_site_params['AOSP_GERRIT_HOST'],
+          weave_remote: default_site_params['WEAVE_GERRIT_HOST'],
+      },
+      CROS_REMOTES={
+          external_remote: default_site_params['EXTERNAL_GOB_URL'],
+          internal_remote: default_site_params['INTERNAL_GOB_URL'],
+          aosp_remote: default_site_params['AOSP_GOB_URL'],
+          weave_remote: default_site_params['WEAVE_GOB_URL'],
+      },
+      GIT_REMOTES={
+          chromium_remote: default_site_params['EXTERNAL_GOB_URL'],
+          chrome_remote: default_site_params['INTERNAL_GOB_URL'],
+          external_remote: default_site_params['EXTERNAL_GOB_URL'],
+          internal_remote: default_site_params['INTERNAL_GOB_URL'],
+          aosp_remote: default_site_params['AOSP_GOB_URL'],
+          weave_remote: default_site_params['WEAVE_GOB_URL'],
+      },
+
+      # Prefix to distinguish internal and external changes. This is used
+      # when a user specifies a patch with "-g", when generating a key for
+      # a patch to use in our PatchCache, and when displaying a custom
+      # string for the patch.
+      INTERNAL_CHANGE_PREFIX=internal_change_prefix,
+      EXTERNAL_CHANGE_PREFIX=external_change_prefix,
+      CHANGE_PREFIX={
+          external_remote: external_change_prefix,
+          internal_remote: internal_change_prefix,
+      },
+
+      # List of remotes that are okay to include in the external manifest.
+      EXTERNAL_REMOTES=(
+          external_remote, chromium_remote, aosp_remote, weave_remote,
+      ),
+
+      # Mapping 'remote name' -> regexp that matches names of repositories on
+      # that remote that can be branched when creating CrOS branch.
+      # Branching script will actually create a new git ref when branching
+      # these projects. It won't attempt to create a git ref for other projects
+      # that may be mentioned in a manifest. If a remote is missing from this
+      # dictionary, all projects on that remote are considered to not be
+      # branchable.
+      BRANCHABLE_PROJECTS={
+          external_remote: r'(chromiumos|aosp)/(.+)',
+          internal_remote: r'chromeos/(.+)',
+      },
+
+      # Additional parameters used to filter manifests, create modified
+      # manifests, and to branch manifests.
+      MANIFEST_VERSIONS_GOB_URL=('%s/chromiumos/manifest-versions' %
+                                 default_site_params['EXTERNAL_GOB_URL']),
+      MANIFEST_VERSIONS_GOB_URL_TEST=('%s/chromiumos/manifest-versions-test' %
+                                      default_site_params['EXTERNAL_GOB_URL']),
+      MANIFEST_VERSIONS_INT_GOB_URL=('%s/chromeos/manifest-versions' %
+                                     default_site_params['INTERNAL_GOB_URL']),
+      MANIFEST_VERSIONS_INT_GOB_URL_TEST=(
+          '%s/chromeos/manifest-versions-test' %
+          default_site_params['INTERNAL_GOB_URL']),
+      MANIFEST_VERSIONS_GS_URL='gs://chromeos-manifest-versions',
+
+      # Standard directories under buildroot for cloning these repos.
+      EXTERNAL_MANIFEST_VERSIONS_PATH='manifest-versions',
+      INTERNAL_MANIFEST_VERSIONS_PATH='manifest-versions-internal',
+
+      # GS URL in which to archive build artifacts.
+      ARCHIVE_URL='gs://chromeos-image-archive',
+  )
+
+  return default_site_params
+
+
+class SiteConfig(dict):
+  """This holds a set of named BuildConfig values."""
+
+  def __init__(self, defaults=None, templates=None):
+    """Init.
+
+    Args:
+      defaults: Dictionary of key value pairs to use as BuildConfig values.
+                All BuildConfig values should be defined here. If None,
+                the DefaultSettings() is used. Most sites should use
+                DefaultSettings(), and then update to add any site specific
+                values needed.
+      templates: Dictionary of template names to partial BuildConfigs
+                 other BuildConfigs can be based on. Mostly used to reduce
+                 verbosity of the config dump file format.
+    """
+    super(SiteConfig, self).__init__()
+    self._defaults = DefaultSettings()
+    if defaults:
+      self._defaults.update(defaults)
+    self._templates = AttrDict() if templates is None else AttrDict(templates)
+
+  def GetDefault(self):
+    """Create the canonical default build configuration."""
+    # Enumeration of valid settings; any/all config settings must be in this.
+    # All settings must be documented.
+    return BuildConfig(**self._defaults)
+
+  def GetTemplates(self):
+    """Get the templates of the build configs"""
+    return self._templates
+
+  @property
+  def templates(self):
+    return self._templates
+
+  #
+  # Methods for searching a SiteConfig's contents.
+  #
+  def GetBoards(self):
+    """Return an iterable of all boards in the SiteConfig."""
+    return set(
+        itertools.chain.from_iterable(
+            x.boards for x in self.values() if x.boards))
+
+  def FindFullConfigsForBoard(self, board=None):
+    """Returns full builder configs for a board.
+
+    Args:
+      board: The board to match. By default, match all boards.
+
+    Returns:
+      A tuple containing a list of matching external configs and a list of
+      matching internal release configs for a board.
+    """
+    ext_cfgs = []
+    int_cfgs = []
+
+    for name, c in self.items():
+      if c['boards'] and (board is None or board in c['boards']):
+        if name.endswith('-%s' % CONFIG_TYPE_RELEASE) and c['internal']:
+          int_cfgs.append(c.deepcopy())
+        elif name.endswith('-%s' % CONFIG_TYPE_FULL) and not c['internal']:
+          ext_cfgs.append(c.deepcopy())
+
+    return ext_cfgs, int_cfgs
+
+  def FindCanonicalConfigForBoard(self, board, allow_internal=True):
+    """Get the canonical cbuildbot builder config for a board."""
+    ext_cfgs, int_cfgs = self.FindFullConfigsForBoard(board)
+    # If both external and internal builds exist for this board, prefer the
+    # internal one unless instructed otherwise.
+    both = (int_cfgs if allow_internal else []) + ext_cfgs
+
+    if not both:
+      raise ValueError('Invalid board specified: %s.' % board)
+    return both[0]
+
+  def GetSlaveConfigMapForMaster(self,
+                                 master_config,
+                                 options=None,
+                                 important_only=True):
+    """Gets the slave builds triggered by a master config.
+
+    If a master builder also performs a build, it can (incorrectly) return
+    itself.
+
+    Args:
+      master_config: A build config for a master builder.
+      options: The options passed on the commandline. This argument is required
+      for normal operation, but we accept None to assist with testing.
+      important_only: If True, only get the important slaves.
+
+    Returns:
+      A slave_name to slave_config map, corresponding to the slaves for the
+      master represented by master_config.
+
+    Raises:
+      AssertionError if the given config is not a master config or it does
+        not have a manifest_version.
+    """
+    assert master_config.master
+    assert master_config.slave_configs is not None
+
+    slave_name_config_map = {}
+    if options is not None and options.remote_trybot:
+      return {}
+
+    # Look up the build configs for all slaves named by the master.
+    slave_name_config_map = {
+        name: self[name] for name in master_config.slave_configs
+    }
+
+    if important_only:
+      # Remove unimportant configs from the result.
+      slave_name_config_map = {
+          k: v for k, v in slave_name_config_map.items() if v.important
+      }
+
+    return slave_name_config_map
+
+  def GetSlavesForMaster(self, master_config, options=None,
+                         important_only=True):
+    """Get a list of qualified build slave configs given the master_config.
+
+    Args:
+      master_config: A build config for a master builder.
+      options: The options passed on the commandline. This argument is optional,
+               and only makes sense when called from cbuildbot.
+      important_only: If True, only get the important slaves.
+    """
+    slave_map = self.GetSlaveConfigMapForMaster(
+        master_config, options=options, important_only=important_only)
+    return list(slave_map.values())
+
+  #
+  # Methods used when creating a Config programatically.
+  #
+  def Add(self, name, template=None, *args, **kwargs):
+    """Add a new BuildConfig to the SiteConfig.
+
+    Examples:
+      # Creates default build named foo.
+      site_config.Add('foo')
+
+      # Creates default build with board 'foo_board'
+      site_config.Add('foo',
+                      boards=['foo_board'])
+
+      # Creates build based on template_build for 'foo_board'.
+      site_config.Add('foo',
+                      template_build,
+                      boards=['foo_board'])
+
+      # Creates build based on template for 'foo_board'. with mixin.
+      # Inheritance order is default, template, mixin, arguments.
+      site_config.Add('foo',
+                      template_build,
+                      mixin_build_config,
+                      boards=['foo_board'])
+
+      # Creates build without a template but with mixin.
+      # Inheritance order is default, template, mixin, arguments.
+      site_config.Add('foo',
+                      None,
+                      mixin_build_config,
+                      boards=['foo_board'])
+
+    Args:
+      name: The name to label this configuration; this is what cbuildbot
+            would see.
+      template: BuildConfig to use as a template for this build.
+      args: BuildConfigs to patch into this config. First one (if present) is
+            considered the template. See AddTemplate for help on templates.
+      kwargs: BuildConfig values to explicitly set on this config.
+
+    Returns:
+      The BuildConfig just added to the SiteConfig.
+    """
+    assert name not in self, ('%s already exists.' % name)
+
+    inherits, overrides = args, kwargs
+    if template:
+      inherits = (template,) + inherits
+
+    # Make sure we don't ignore that argument silently.
+    if '_template' in overrides:
+      raise ValueError('_template cannot be explicitly set.')
+
+    result = self.GetDefault()
+    result.apply(*inherits, **overrides)
+
+    # Select the template name based on template argument, or nothing.
+    resolved_template = template.get('_template') if template else None
+    assert not resolved_template or resolved_template in self.templates, \
+        '%s inherits from non-template %s' % (name, resolved_template)
+
+    # Our name is passed as an explicit argument. We use the first build
+    # config as our template, or nothing.
+    result['name'] = name
+    result['_template'] = resolved_template
+    self[name] = result
+    return result
+
+  def AddWithoutTemplate(self, name, *args, **kwargs):
+    """Add a config containing only explicitly listed values (no defaults)."""
+    self.Add(name, None, *args, **kwargs)
+
+  def AddGroup(self, name, *args, **kwargs):
+    """Create a new group of build configurations.
+
+    Args:
+      name: The name to label this configuration; this is what cbuildbot
+            would see.
+      args: Configurations to build in this group. The first config in
+            the group is considered the primary configuration and is used
+            for syncing and creating the chroot.
+      kwargs: Override values to use for the parent config.
+
+    Returns:
+      A new BuildConfig instance.
+    """
+    child_configs = [x.deepcopy().apply(grouped=True) for x in args]
+    return self.Add(name, args[0], child_configs=child_configs, **kwargs)
+
+  def AddForBoards(self,
+                   suffix,
+                   boards,
+                   per_board=None,
+                   template=None,
+                   *args,
+                   **kwargs):
+    """Create configs for all boards in |boards|.
+
+    Args:
+      suffix: Config name is <board>-<suffix>.
+      boards: A list of board names as strings.
+      per_board: A dictionary of board names to BuildConfigs, or None.
+      template: The template to use for all configs created.
+      *args: Mixin templates to apply.
+      **kwargs: Additional keyword arguments to be used in AddConfig.
+
+    Returns:
+      List of the configs created.
+    """
+    result = []
+
+    for board in boards:
+      config_name = '%s-%s' % (board, suffix)
+
+      # Insert the per_board value as the last mixin, if it exists.
+      mixins = args + (dict(boards=[board]),)
+      if per_board and board in per_board:
+        mixins = mixins + (per_board[board],)
+
+      # Create the new config for this board.
+      result.append(self.Add(config_name, template, *mixins, **kwargs))
+
+    return result
+
+  def ApplyForBoards(self, suffix, boards, *args, **kwargs):
+    """Update configs for all boards in |boards|.
+
+    Args:
+      suffix: Config name is <board>-<suffix>.
+      boards: A list of board names as strings.
+      *args: Mixin templates to apply.
+      **kwargs: Additional keyword arguments to be used in AddConfig.
+
+    Returns:
+      List of the configs updated.
+    """
+    result = []
+
+    for board in boards:
+      config_name = '%s-%s' % (board, suffix)
+      assert config_name in self, ('%s does not exist.' % config_name)
+
+      # Update the config for this board.
+      result.append(self[config_name].apply(*args, **kwargs))
+
+    return result
+
+  def AddTemplate(self, name, *args, **kwargs):
+    """Create a template named |name|.
+
+    Templates are used to define common settings that are shared across types
+    of builders. They help reduce duplication in config_dump.json, because we
+    only define the template and its settings once.
+
+    Args:
+      name: The name of the template.
+      args: See the docstring of BuildConfig.derive.
+      kwargs: See the docstring of BuildConfig.derive.
+    """
+    assert name not in self._templates, ('Template %s already exists.' % name)
+
+    template = BuildConfig()
+    template.apply(*args, **kwargs)
+    template['_template'] = name
+    self._templates[name] = template
+
+    return template
+
+  def _MarshalBuildConfig(self, name, config):
+    """Hide the defaults from a given config entry.
+
+    Args:
+      name: Default build name (usually dictionary key).
+      config: A config entry.
+
+    Returns:
+      The same config entry, but without any defaults.
+    """
+    defaults = self.GetDefault()
+    defaults['name'] = name
+
+    template = config.get('_template')
+    if template:
+      defaults.apply(self._templates[template])
+      defaults['_template'] = None
+
+    result = {}
+    for k, v in config.items():
+      if defaults.get(k) != v:
+        if k == 'child_configs':
+          result['child_configs'] = [
+              self._MarshalBuildConfig(name, child) for child in v
+          ]
+        else:
+          result[k] = v
+
+    return result
+
+  def _MarshalTemplates(self):
+    """Return a version of self._templates with only used templates.
+
+    Templates have callables/delete keys resolved against GetDefault() to
+    ensure they can be safely saved to json.
+
+    Returns:
+      Dict copy of self._templates with all unreferenced templates removed.
+    """
+    defaults = self.GetDefault()
+
+    # All templates used. We ignore child configs since they
+    # should exist at top level.
+    used = set(c.get('_template', None) for c in self.values())
+    used.discard(None)
+
+    result = {}
+
+    for name in used:
+      # Expand any special values (callables, etc)
+      expanded = defaults.derive(self._templates[name])
+      # Recover the '_template' value which is filtered out by derive.
+      expanded['_template'] = name
+      # Hide anything that matches the default.
+      save = {k: v for k, v in expanded.items() if defaults.get(k) != v}
+      result[name] = save
+
+    return result
+
+  def SaveConfigToString(self):
+    """Save this Config object to a Json format string."""
+    default = self.GetDefault()
+
+    config_dict = {}
+    config_dict['_default'] = default
+    config_dict['_templates'] = self._MarshalTemplates()
+    for k, v in self.items():
+      config_dict[k] = self._MarshalBuildConfig(k, v)
+
+    return PrettyJsonDict(config_dict)
+
+  def SaveConfigToFile(self, config_file):
+    """Save this Config to a Json file.
+
+    Args:
+      config_file: The file to write too.
+    """
+    json_string = self.SaveConfigToString()
+    osutils.WriteFile(config_file, json_string)
+
+  def DumpExpandedConfigToString(self):
+    """Dump the SiteConfig to Json with all configs full expanded.
+
+    This is intended for debugging default/template behavior. The dumped JSON
+    can't be reloaded (at least not reliably).
+    """
+    return PrettyJsonDict(self)
+
+  def DumpConfigCsv(self):
+    """Dump the SiteConfig to CSV with all configs fully expanded.
+
+    This supports configuration analysis and debugging.
+    """
+    raw_config = json.loads(self.DumpExpandedConfigToString())
+    header_keys = {'builder_name', 'test_type', 'device'}
+    csv_rows = []
+    for builder_name, values in raw_config.items():
+      row = {'builder_name': builder_name}
+      tests = {}
+      raw_devices = []
+      for key, value in values.items():
+        header_keys.add(key)
+        if value:
+          if isinstance(value, list):
+            if '_tests' in key:
+              tests[key] = value
+            elif key == 'models':
+              raw_devices = value
+            else:
+              # Ignoring this for now for test analysis.
+              if key != 'child_configs':
+                row[key] = ' | '.join(str(array_val) for array_val in value)
+          else:
+            row[key] = value
+
+      if tests:
+        for test_type, test_entries in tests.items():
+          for test_entry in test_entries:
+            test_row = copy.deepcopy(row)
+            test_row['test_type'] = test_type
+            raw_test = json.loads(test_entry)
+            for test_key, test_value in raw_test.items():
+              if test_value:
+                header_keys.add(test_key)
+                test_row[test_key] = test_value
+            csv_rows.append(test_row)
+            if raw_devices:
+              for raw_device in raw_devices:
+                device = json.loads(raw_device)
+                test_suite = test_row.get('suite', '')
+                test_suites = device.get('test_suites', [])
+                if test_suite and test_suites and test_suite in test_suites:
+                  device_row = copy.deepcopy(test_row)
+                  device_row['device'] = device['name']
+                  csv_rows.append(device_row)
+      else:
+        csv_rows.append(row)
+
+    csv_result = [','.join(header_keys)]
+    for csv_row in csv_rows:
+      row_values = []
+      for header_key in header_keys:
+        row_values.append('"%s"' % str(csv_row.get(header_key, '')))
+      csv_result.append(','.join(row_values))
+
+    return '\n'.join(csv_result)
+
+
+#
+# Functions related to working with GE Data.
+#
+
+
+def LoadGEBuildConfigFromFile(
+    build_settings_file=constants.GE_BUILD_CONFIG_FILE):
+  """Load template config dict from a Json encoded file."""
+  json_string = osutils.ReadFile(build_settings_file)
+  return json.loads(json_string)
+
+
+def GeBuildConfigAllBoards(ge_build_config):
+  """Extract a list of board names from the GE Build Config.
+
+  Args:
+    ge_build_config: Dictionary containing the decoded GE configuration file.
+
+  Returns:
+    A list of board names as strings.
+  """
+  return [b['name'] for b in ge_build_config['boards']]
+
+
+def GetUnifiedBuildConfigAllBuilds(ge_build_config):
+  """Extract a list of all unified build configurations.
+
+  This dictionary is based on the JSON defined by the proto generated from
+  GoldenEye.  See cs/crosbuilds.proto
+
+  Args:
+    ge_build_config: Dictionary containing the decoded GE configuration file.
+
+  Returns:
+    A list of unified build configurations (json configs)
+  """
+  return ge_build_config.get('reference_board_unified_builds', [])
+
+
+class BoardGroup(object):
+  """Class holds leader_boards and follower_boards for grouped boards"""
+
+  def __init__(self):
+    self.leader_boards = []
+    self.follower_boards = []
+
+  def AddLeaderBoard(self, board):
+    self.leader_boards.append(board)
+
+  def AddFollowerBoard(self, board):
+    self.follower_boards.append(board)
+
+  def __str__(self):
+    return ('Leader_boards: %s Follower_boards: %s' % (self.leader_boards,
+                                                       self.follower_boards))
+
+
+def GroupBoardsByBuilderAndBoardGroup(board_list):
+  """Group boards by builder and board_group.
+
+  Args:
+    board_list: board list from the template file.
+
+  Returns:
+    builder_group_dict: maps builder to {group_n: board_group_n}
+    builder_ungrouped_dict: maps builder to a list of ungrouped boards
+  """
+  builder_group_dict = {}
+  builder_ungrouped_dict = {}
+
+  for b in board_list:
+    name = b[CONFIG_TEMPLATE_NAME]
+    # Invalid build configs being written out with no config templates,
+    # thus the default. See https://crbug.com/1012278.
+    for config in b.get(CONFIG_TEMPLATE_CONFIGS, []):
+      board = {'name': name}
+      board.update(config)
+
+      builder = config[CONFIG_TEMPLATE_BUILDER]
+      if builder not in builder_group_dict:
+        builder_group_dict[builder] = {}
+      if builder not in builder_ungrouped_dict:
+        builder_ungrouped_dict[builder] = []
+
+      board_group = config[CONFIG_TEMPLATE_BOARD_GROUP]
+      if not board_group:
+        builder_ungrouped_dict[builder].append(board)
+        continue
+      if board_group not in builder_group_dict[builder]:
+        builder_group_dict[builder][board_group] = BoardGroup()
+      if config[CONFIG_TEMPLATE_LEADER_BOARD]:
+        builder_group_dict[builder][board_group].AddLeaderBoard(board)
+      else:
+        builder_group_dict[builder][board_group].AddFollowerBoard(board)
+
+  return (builder_group_dict, builder_ungrouped_dict)
+
+
+def GroupBoardsByBuilder(board_list):
+  """Group boards by the 'builder' flag."""
+  builder_to_boards_dict = {}
+
+  for b in board_list:
+    # Invalid build configs being written out with no configs array, thus the
+    # default. See https://crbug.com/1005803.
+    for config in b.get(CONFIG_TEMPLATE_CONFIGS, []):
+      builder = config[CONFIG_TEMPLATE_BUILDER]
+      if builder not in builder_to_boards_dict:
+        builder_to_boards_dict[builder] = set()
+      builder_to_boards_dict[builder].add(b[CONFIG_TEMPLATE_NAME])
+
+  return builder_to_boards_dict
+
+
+def GetNonUniBuildLabBoardName(board):
+  """Return the board name labeled in the lab for non-unibuild."""
+  # Those special string represent special configuration used in the image,
+  # and should run on DUT without those string.
+  # We strip those string from the board so that lab can handle it correctly.
+  SPECIAL_SUFFIX = [
+      '-arcnext$', '-arcvm$', '-arc-r$', '-arc-r-userdebug$', '-blueznext$',
+      '-kernelnext$', '-kvm$', '-ndktranslation$', '-cfm$', '-campfire$',
+      '-borealis$',
+  ]
+  # ARM64 userspace boards use 64 suffix but can't put that in list above
+  # because of collisions with boards like kevin-arc64.
+  ARM64_BOARDS = ['cheza64', 'kevin64']
+  for suffix in SPECIAL_SUFFIX:
+    board = re.sub(suffix, '', board)
+  if board in ARM64_BOARDS:
+    # Remove '64' suffix from the board name.
+    board = board[:-2]
+  return board
+
+
+def GetArchBoardDict(ge_build_config):
+  """Get a dict mapping arch types to board names.
+
+  Args:
+    ge_build_config: Dictionary containing the decoded GE configuration file.
+
+  Returns:
+    A dict mapping arch types to board names.
+  """
+  arch_board_dict = {}
+
+  for b in ge_build_config[CONFIG_TEMPLATE_BOARDS]:
+    board_name = b[CONFIG_TEMPLATE_NAME]
+    # Invalid build configs being written out with no configs array, thus the
+    # default. See https://crbug.com/947712.
+    for config in b.get(CONFIG_TEMPLATE_CONFIGS, []):
+      arch = config[CONFIG_TEMPLATE_ARCH]
+      arch_board_dict.setdefault(arch, set()).add(board_name)
+
+  for b in GetUnifiedBuildConfigAllBuilds(ge_build_config):
+    board_name = b[CONFIG_TEMPLATE_REFERENCE_BOARD_NAME]
+    arch = b[CONFIG_TEMPLATE_ARCH]
+    arch_board_dict.setdefault(arch, set()).add(board_name)
+
+  return arch_board_dict
+
+
+#
+# Functions related to loading/saving Json.
+#
+class ObjectJSONEncoder(json.JSONEncoder):
+  """Json Encoder that encodes objects as their dictionaries."""
+
+  # pylint: disable=method-hidden
+  def default(self, o):
+    return self.encode(o.__dict__)
+
+
+def PrettyJsonDict(dictionary):
+  """Returns a pretty-ified json dump of a dictionary."""
+  return json.dumps(
+      dictionary,
+      cls=ObjectJSONEncoder,
+      sort_keys=True,
+      indent=4,
+      separators=(',', ': ')) + '\n'
+
+
+def LoadConfigFromFile(config_file=constants.CHROMEOS_CONFIG_FILE):
+  """Load a Config a Json encoded file."""
+  json_string = osutils.ReadFile(config_file)
+  return LoadConfigFromString(json_string)
+
+
+def LoadConfigFromString(json_string):
+  """Load a cbuildbot config from it's Json encoded string."""
+  config_dict = json.loads(json_string)
+
+  # Use standard defaults, but allow the config to override.
+  defaults = DefaultSettings()
+  defaults.update(config_dict.pop(DEFAULT_BUILD_CONFIG))
+  _DeserializeConfigs(defaults)
+
+  templates = config_dict.pop('_templates', {})
+  for t in templates.values():
+    _DeserializeConfigs(t)
+
+  defaultBuildConfig = BuildConfig(**defaults)
+
+  builds = {
+      n: _CreateBuildConfig(n, defaultBuildConfig, v, templates)
+      for n, v in config_dict.items()
+  }
+
+  # config is the struct that holds the complete cbuildbot config.
+  result = SiteConfig(defaults=defaults, templates=templates)
+  result.update(builds)
+
+  return result
+
+
+def _DeserializeConfig(build_dict,
+                       config_key,
+                       config_class,
+                       preserve_none=False):
+  """Deserialize config of given type inside build_dict.
+
+  Args:
+    build_dict: The build_dict to update (in place)
+    config_key: Key for the config inside build_dict.
+    config_class: The class to instantiate for the config.
+    preserve_none: If True, None values are preserved as is. By default, they
+        are dropped.
+  """
+  serialized_configs = build_dict.pop(config_key, None)
+  if serialized_configs is None:
+    if preserve_none:
+      build_dict[config_key] = None
+    return
+
+  deserialized_configs = []
+  for config_string in serialized_configs:
+    if isinstance(config_string, config_class):
+      deserialized_config = config_string
+    else:
+      # Each test config is dumped as a json string embedded in json.
+      embedded_configs = json.loads(config_string)
+      deserialized_config = config_class(**embedded_configs)
+    deserialized_configs.append(deserialized_config)
+  build_dict[config_key] = deserialized_configs
+
+
+def _DeserializeConfigs(build_dict):
+  """Updates a config dictionary with recreated objects.
+
+  Notification configs and various test configs are serialized as strings
+  (rather than JSON objects), so we need to turn them into real objects before
+  they can be consumed.
+
+  Args:
+    build_dict: The config dictionary to update (in place).
+  """
+  _DeserializeConfig(build_dict, 'vm_tests', VMTestConfig)
+  _DeserializeConfig(
+      build_dict, 'vm_tests_override', VMTestConfig, preserve_none=True)
+  _DeserializeConfig(build_dict, 'models', ModelTestConfig)
+  _DeserializeConfig(build_dict, 'hw_tests', HWTestConfig)
+  _DeserializeConfig(
+      build_dict, 'hw_tests_override', HWTestConfig, preserve_none=True)
+  _DeserializeConfig(build_dict, 'gce_tests', GCETestConfig)
+  _DeserializeConfig(build_dict, 'tast_vm_tests', TastVMTestConfig)
+  _DeserializeConfig(build_dict, 'moblab_vm_tests', MoblabVMTestConfig)
+  _DeserializeConfig(build_dict, 'notification_configs', NotificationConfig)
+
+
+def _CreateBuildConfig(name, default, build_dict, templates):
+  """Create a BuildConfig object from it's parsed JSON dictionary encoding."""
+  # These build config values need special handling.
+  child_configs = build_dict.pop('child_configs', None)
+  template = build_dict.get('_template')
+
+  # Use the name passed in as the default build name.
+  build_dict.setdefault('name', name)
+
+  result = default.deepcopy()
+  # Use update to explicitly avoid apply's special handing.
+  if template:
+    result.update(templates[template])
+  result.update(build_dict)
+
+  _DeserializeConfigs(result)
+
+  if child_configs is not None:
+    result['child_configs'] = [
+        _CreateBuildConfig(name, default, child, templates)
+        for child in child_configs
+    ]
+
+  return result
+
+
[email protected]
+def GetConfig():
+  """Load the current SiteConfig.
+
+  Returns:
+    SiteConfig instance to use for this build.
+  """
+  return LoadConfigFromFile(constants.CHROMEOS_CONFIG_FILE)
+
+
[email protected]
+def GetSiteParams():
+  """Get the site parameter configs.
+
+  This is the new, preferred method of accessing the site parameters, instead of
+  SiteConfig.params.
+
+  Returns:
+    AttrDict of site parameters
+  """
+  site_params = AttrDict()
+  site_params.update(DefaultSiteParameters())
+  return site_params
+
+
+def append_useflags(useflags):
+  """Used to append a set of useflags to existing useflags.
+
+  Useflags that shadow prior use flags will cause the prior flag to be removed.
+  (e.g. appending '-foo' to 'foo' will cause 'foo' to be removed)
+
+  Examples:
+    new_config = base_config.derive(useflags=append_useflags(['foo', '-bar'])
+
+  Args:
+    useflags: List of string useflags to append.
+  """
+  assert isinstance(useflags, (list, set))
+  shadowed_useflags = {
+      '-' + flag for flag in useflags if not flag.startswith('-')
+  }
+  shadowed_useflags.update(
+      {flag[1:] for flag in useflags if flag.startswith('-')})
+
+  def handler(old_useflags):
+    new_useflags = set(old_useflags or [])
+    new_useflags.update(useflags)
+    new_useflags.difference_update(shadowed_useflags)
+    return sorted(list(new_useflags))
+
+  return handler
diff --git a/utils/frozen_chromite/lib/constants.py b/utils/frozen_chromite/lib/constants.py
new file mode 100644
index 0000000..86f1330
--- /dev/null
+++ b/utils/frozen_chromite/lib/constants.py
@@ -0,0 +1,983 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This module contains constants used by cbuildbot and related code."""
+
+from __future__ import print_function
+
+import itertools
+import os
+
+def _FindSourceRoot():
+  """Try and find the root check out of the chromiumos tree"""
+  source_root = path = os.path.realpath(os.path.join(
+      os.path.abspath(__file__), '..', '..', '..'))
+  while True:
+    if os.path.isdir(os.path.join(path, '.repo')):
+      return path
+    elif path == '/':
+      break
+    path = os.path.dirname(path)
+  return source_root
+
+
+SOURCE_ROOT = _FindSourceRoot()
+CHROOT_SOURCE_ROOT = '/mnt/host/source'
+CHROOT_CACHE_ROOT = '/var/cache/chromeos-cache'
+DEPOT_TOOLS_SUBPATH = 'src/chromium/depot_tools'
+
+CROSUTILS_DIR = os.path.join(SOURCE_ROOT, 'src/scripts')
+CHROMITE_DIR = os.path.realpath(os.path.join(
+    os.path.abspath(__file__), '..', '..'))
+BOOTSTRAP_DIR = os.path.join(CHROMITE_DIR, 'bootstrap')
+DEPOT_TOOLS_DIR = os.path.join(SOURCE_ROOT, DEPOT_TOOLS_SUBPATH)
+CHROMITE_BIN_SUBDIR = 'chromite/bin'
+CHROMITE_BIN_DIR = os.path.join(CHROMITE_DIR, 'bin')
+CHROMITE_SCRIPTS_DIR = os.path.join(CHROMITE_DIR, 'scripts')
+PATH_TO_CBUILDBOT = os.path.join(CHROMITE_BIN_SUBDIR, 'cbuildbot')
+DEFAULT_CHROOT_DIR = 'chroot'
+DEFAULT_CHROOT_PATH = os.path.join(SOURCE_ROOT, DEFAULT_CHROOT_DIR)
+TERMINA_TOOLS_DIR = os.path.join(
+    SOURCE_ROOT, 'src/platform/container-guest-tools/termina')
+
+STATEFUL_DIR = '/mnt/stateful_partition'
+
+# These constants are defined and used in the die_hook that logs failed
+# packages: 'cros_log_failed_packages' in profiles/base/profile.bashrc in
+# chromiumos-overlay. The status file is generated in CROS_METRICS_DIR, and
+# only if that environment variable is defined.
+CROS_METRICS_DIR_ENVVAR = 'CROS_METRICS_DIR'
+DIE_HOOK_STATUS_FILE_NAME = 'FAILED_PACKAGES'
+
+CHROMEOS_CONFIG_FILE = os.path.join(CHROMITE_DIR, 'config', 'config_dump.json')
+WATERFALL_CONFIG_FILE = os.path.join(
+    CHROMITE_DIR, 'config', 'waterfall_layout_dump.txt')
+LUCI_SCHEDULER_CONFIG_FILE = os.path.join(
+    CHROMITE_DIR, 'config', 'luci-scheduler.cfg')
+
+GE_BUILD_CONFIG_FILE = os.path.join(
+    CHROMITE_DIR, 'config', 'ge_build_config.json')
+
+# The following define the location for storing toolchain packages and
+# SDK overlay tarballs created during SDK builder runs. The paths are relative
+# to the build root's chroot, which guarantees that they are reachable from it
+# and get cleaned up when it is removed.
+SDK_TOOLCHAINS_OUTPUT = 'tmp/toolchain-pkgs'
+SDK_OVERLAYS_OUTPUT = 'tmp/sdk-overlays'
+
+AUTOTEST_BUILD_PATH = 'usr/local/build/autotest'
+UNITTEST_PKG_PATH = 'test-packages'
+
+# Only used for testing pinned images on test images.
+GUEST_IMAGES_PINS_PATH = 'usr/local/opt/google/containers/pins'
+PIN_KEY_FILENAME = 'filename'
+PIN_KEY_GSURI = 'gsuri'
+
+# Path to the lsb-release file on the device.
+LSB_RELEASE_PATH = '/etc/lsb-release'
+
+HOME_DIRECTORY = os.path.expanduser('~')
+
+# If cbuiltbot is running on a bot, then the cidb access credentials will be
+# available here. This directory will not exist otherwise.
+CIDB_PROD_BOT_CREDS = os.path.join(HOME_DIRECTORY, '.cidb_creds',
+                                   'prod_cidb_bot')
+CIDB_DEBUG_BOT_CREDS = os.path.join(HOME_DIRECTORY, '.cidb_creds',
+                                    'debug_cidb_bot')
+
+# Crash Server upload API key.
+CRASH_API_KEY = os.path.join('/', 'creds', 'api_keys',
+                             'api_key-chromeos-crash-uploader')
+
+# Buildbucket build status
+BUILDBUCKET_BUILDER_STATUS_SCHEDULED = 'SCHEDULED'
+BUILDBUCKET_BUILDER_STATUS_STARTED = 'STARTED'
+BUILDBUCKET_BUILDER_STATUS_COMPLETED = 'COMPLETED'
+
+BUILDBUCKET_BUILDER_STATUSES = (BUILDBUCKET_BUILDER_STATUS_SCHEDULED,
+                                BUILDBUCKET_BUILDER_STATUS_STARTED,
+                                BUILDBUCKET_BUILDER_STATUS_COMPLETED)
+
+BUILDBUCKET_BUILDER_RESULT_SUCCESS = 'SUCCESS'
+BUILDBUCKET_BUILDER_RESULT_FAILURE = 'FAILURE'
+BUILDBUCKET_BUILDER_RESULT_CANCELED = 'CANCELED'
+
+# Builder status strings
+BUILDER_STATUS_FAILED = 'fail'
+BUILDER_STATUS_PASSED = 'pass'
+BUILDER_STATUS_INFLIGHT = 'inflight'
+BUILDER_STATUS_MISSING = 'missing'
+BUILDER_STATUS_ABORTED = 'aborted'
+# The following statuses are currently only used for build stages.
+BUILDER_STATUS_PLANNED = 'planned'
+BUILDER_STATUS_WAITING = 'waiting'
+BUILDER_STATUS_SKIPPED = 'skipped'
+BUILDER_STATUS_FORGIVEN = 'forgiven'
+BUILDER_COMPLETED_STATUSES = (BUILDER_STATUS_PASSED,
+                              BUILDER_STATUS_FAILED,
+                              BUILDER_STATUS_ABORTED,
+                              BUILDER_STATUS_SKIPPED,
+                              BUILDER_STATUS_FORGIVEN)
+BUILDER_ALL_STATUSES = (BUILDER_STATUS_FAILED,
+                        BUILDER_STATUS_PASSED,
+                        BUILDER_STATUS_INFLIGHT,
+                        BUILDER_STATUS_MISSING,
+                        BUILDER_STATUS_ABORTED,
+                        BUILDER_STATUS_WAITING,
+                        BUILDER_STATUS_PLANNED,
+                        BUILDER_STATUS_SKIPPED,
+                        BUILDER_STATUS_FORGIVEN)
+BUILDER_NON_FAILURE_STATUSES = (BUILDER_STATUS_PLANNED,
+                                BUILDER_STATUS_PASSED,
+                                BUILDER_STATUS_SKIPPED,
+                                # Quick fix for Buildbucket race problems.
+                                BUILDER_STATUS_INFLIGHT,
+                                BUILDER_STATUS_FORGIVEN)
+
+# Partition labels
+CROS_PART_STATEFUL = 'STATE'
+
+# Signer status strings
+SIGNER_STATUS_PASSED = 'passed'
+SIGNER_STATUS_FAILED = 'failed'
+
+# Change sources
+CHANGE_SOURCE_INTERNAL = 'internal'
+CHANGE_SOURCE_EXTERNAL = 'external'
+
+# Exception categories, as recorded in cidb
+EXCEPTION_CATEGORY_UNKNOWN = 'unknown'
+EXCEPTION_CATEGORY_BUILD = 'build'
+EXCEPTION_CATEGORY_TEST = 'test'
+EXCEPTION_CATEGORY_INFRA = 'infra'
+EXCEPTION_CATEGORY_LAB = 'lab'
+
+EXCEPTION_CATEGORY_ALL_CATEGORIES = (
+    EXCEPTION_CATEGORY_UNKNOWN,
+    EXCEPTION_CATEGORY_BUILD,
+    EXCEPTION_CATEGORY_TEST,
+    EXCEPTION_CATEGORY_INFRA,
+    EXCEPTION_CATEGORY_LAB,
+)
+
+# Monarch metric names
+MON_LAST_SLAVE = 'chromeos/cbuildbot/last_completed_slave'
+MON_BUILD_COMP_COUNT = 'chromeos/cbuildbot/build/completed_count'
+MON_BUILD_DURATION = 'chromeos/cbuildbot/build/durations'
+MON_STAGE_COMP_COUNT = 'chromeos/cbuildbot/stage/completed_count'
+MON_STAGE_DURATION = 'chromeos/cbuildbot/stage/durations'
+MON_STAGE_INSTANCE_DURATION = 'chromeos/cbuildbot/stage/instance_durations'
+MON_STAGE_FAILURE_COUNT = 'chromeos/cbuildbot/stage/failure_count'
+MON_FAILED_STAGE = 'chromeos/chromite/cbuildbot_launch/failed_stage'
+MON_CHROOT_USED = 'chromeos/cbuildbot/chroot_at_version'
+MON_REPO_SYNC_COUNT = 'chromeos/cbuildbot/repo/sync_count'
+MON_REPO_SYNC_RETRY_COUNT = 'chromeos/cbuildbot/repo/sync_retry_count'
+MON_REPO_SELFUPDATE_FAILURE_COUNT = ('chromeos/cbuildbot/repo/'
+                                     'selfupdate_failure_count')
+MON_REPO_INIT_RETRY_COUNT = 'chromeos/cbuildbot/repo/init_retry_count'
+MON_REPO_MANIFEST_FAILURE_COUNT = ('chromeos/cbuildbot/repo/'
+                                   'manifest_failure_count')
+MON_BB_RETRY_BUILD_COUNT = ('chromeos/cbuildbot/buildbucket/'
+                            'retry_build_count')
+MON_BB_CANCEL_BATCH_BUILDS_COUNT = ('chromeos/cbuildbot/buildbucket/'
+                                    'cancel_batch_builds_count')
+MON_EXPORT_TO_GCLOUD = 'chromeos/cbuildbot/export_to_gcloud'
+
+# Stage Categorization for failed stages metric.
+UNCATEGORIZED_STAGE = 'Uncategorized'
+CI_INFRA_STAGE = 'CI-Infra'
+TEST_INFRA_STAGE = 'Test-Infra'
+PRODUCT_OS_STAGE = 'Product-OS'
+PRODUCT_ANDROID_STAGE = 'Product-Android'
+PRODUCT_CHROME_STAGE = 'Product-Chrome'
+PRODUCT_TOOLCHAIN_STAGE = 'Product-Toolchain'
+
+
+# Re-execution API constants.
+# Used by --resume and --bootstrap to decipher which options they
+# can pass to the target cbuildbot (since it may not have that
+# option).
+# Format is Major.Minor.  Minor is used for tracking new options added
+# that aren't critical to the older version if it's not ran.
+# Major is used for tracking heavy API breakage- for example, no longer
+# supporting the --resume option.
+REEXEC_API_MAJOR = 0
+REEXEC_API_MINOR = 12
+REEXEC_API_VERSION = '%i.%i' % (REEXEC_API_MAJOR, REEXEC_API_MINOR)
+
+# Support --master-build-id
+REEXEC_API_MASTER_BUILD_ID = 3
+# Support --git-cache-dir
+REEXEC_API_GIT_CACHE_DIR = 4
+# Support --goma_dir and --goma_client_json
+REEXEC_API_GOMA = 5
+# Support --ts-mon-task-num
+REEXEC_API_TSMON_TASK_NUM = 6
+# Support --sanity-check-build
+REEXEC_API_SANITY_CHECK_BUILD = 7
+# Support --previous-build-state
+REEXEC_API_PREVIOUS_BUILD_STATE = 8
+# Support --workspace
+REEXEC_API_WORKSPACE = 9
+# Support --master-buildbucket-id
+REEXEC_API_MASTER_BUILDBUCKET_ID = 10
+# Support --chromeos_goma_dir
+REEXEC_API_CHROMEOS_GOMA_DIR = 11
+# Support --chrome-preload-dir
+REEXEC_API_CHROME_PRELOAD_DIR = 12
+
+# We rely on the (waterfall, builder name, build number) to uniquely identify
+# a build. However, future migrations or state wipes of the buildbot master may
+# cause it to reset its build number counter. When that happens, this value
+# should be incremented, ensuring that (waterfall, builder name, build number,
+# buildbot generation) is a unique identifier of builds.
+BUILDBOT_GENERATION = 1
+
+ISOLATESERVER = 'https://isolateserver.appspot.com'
+
+GOOGLE_EMAIL = '@google.com'
+CHROMIUM_EMAIL = '@chromium.org'
+
+CORP_DOMAIN = 'corp.google.com'
+GOLO_DOMAIN = 'golo.chromium.org'
+CHROME_DOMAIN = 'chrome.' + CORP_DOMAIN
+CHROMEOS_BOT_INTERNAL = 'chromeos-bot.internal'
+
+GOB_HOST = '%s.googlesource.com'
+
+EXTERNAL_GOB_INSTANCE = 'chromium'
+EXTERNAL_GERRIT_INSTANCE = 'chromium-review'
+EXTERNAL_GOB_HOST = GOB_HOST % EXTERNAL_GOB_INSTANCE
+EXTERNAL_GERRIT_HOST = GOB_HOST % EXTERNAL_GERRIT_INSTANCE
+EXTERNAL_GOB_URL = 'https://%s' % EXTERNAL_GOB_HOST
+EXTERNAL_GERRIT_URL = 'https://%s' % EXTERNAL_GERRIT_HOST
+
+INTERNAL_GOB_INSTANCE = 'chrome-internal'
+INTERNAL_GERRIT_INSTANCE = 'chrome-internal-review'
+INTERNAL_GOB_HOST = GOB_HOST % INTERNAL_GOB_INSTANCE
+INTERNAL_GERRIT_HOST = GOB_HOST % INTERNAL_GERRIT_INSTANCE
+INTERNAL_GOB_URL = 'https://%s' % INTERNAL_GOB_HOST
+INTERNAL_GERRIT_URL = 'https://%s' % INTERNAL_GERRIT_HOST
+
+# Tests without 'cheets_CTS_', 'cheets_GTS.' prefix will not considered
+# as CTS/GTS test in chromite.lib.cts_helper
+DEFAULT_CTS_TEST_XML_MAP = {
+    'cheets_CTS_': 'test_result.xml',
+    'cheets_GTS.': 'test_result.xml',
+    'cheets_GTS_': 'test_result.xml',
+}
+# Google Storage bucket URI to store results in.
+DEFAULT_CTS_RESULTS_GSURI = 'gs://chromeos-cts-results/'
+DEFAULT_CTS_APFE_GSURI = 'gs://chromeos-cts-apfe/'
+
+ANDROID_CONTAINER_PACKAGE_KEYWORD = 'android-container'
+ANDROID_VM_PACKAGE_KEYWORD = 'android-vm'
+
+ANDROID_BUCKET_URL = 'gs://android-build-chromeos/builds'
+ANDROID_PI_BUILD_BRANCH = 'git_pi-arc'
+ANDROID_VMRVC_BUILD_BRANCH = 'git_rvc-arc-dev'
+ANDROID_VMMST_BUILD_BRANCH = 'git_master-arc-dev'
+
+ANDROID_PI_BUILD_TARGETS = {
+    # Roll XkbToKcmConverter with system image. It's a host executable and
+    # doesn't depend on the target as long as it's pi-arc branch. The converter
+    # is ARC specific and not a part of Android SDK. Having a custom target like
+    # SDK_TOOLS might be better in the long term, but let's use one from ARM or
+    # X86 target as there's no other similar executables right now.
+    # We put it in two buckets because we have separate ACLs for arm and x86.
+    # http://b/128405786
+    'APPS': ('linux-apps', 'org.chromium.arc.cachebuilder.jar'),
+    'ARM': ('linux-cheets_arm-user', r'(\.zip|/XkbToKcmConverter)$'),
+    'ARM64': ('linux-cheets_arm64-user', r'(\.zip|/XkbToKcmConverter)$'),
+    'X86': ('linux-cheets_x86-user', r'(\.zip|/XkbToKcmConverter)$'),
+    'X86_64': ('linux-cheets_x86_64-user', r'\.zip$'),
+    'ARM_USERDEBUG': ('linux-cheets_arm-userdebug', r'\.zip$'),
+    'ARM64_USERDEBUG': ('linux-cheets_arm64-userdebug', r'\.zip$'),
+    'X86_USERDEBUG': ('linux-cheets_x86-userdebug', r'\.zip$'),
+    'X86_64_USERDEBUG': ('linux-cheets_x86_64-userdebug', r'\.zip$'),
+    'SDK_GOOGLE_X86_USERDEBUG': ('linux-sdk_cheets_x86-userdebug', r'\.zip$'),
+    'SDK_GOOGLE_X86_64_USERDEBUG': ('linux-sdk_cheets_x86_64-userdebug',
+                                    r'\.zip$'),
+}
+ANDROID_VMMST_BUILD_TARGETS = {
+    # For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS.
+    'X86_64_USERDEBUG': ('linux-bertha_x86_64-userdebug',
+                         r'(\.zip|/XkbToKcmConverter)$'),
+}
+ANDROID_VMRVC_BUILD_TARGETS = {
+    # For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS.
+    'APPS': ('linux-apps', 'org.chromium.arc.cachebuilder.jar'),
+    'ARM64': ('linux-bertha_arm64-user', r'(\.zip|/XkbToKcmConverter)$'),
+    'X86_64': ('linux-bertha_x86_64-user', r'(\.zip|/XkbToKcmConverter)$'),
+    'ARM64_USERDEBUG': ('linux-bertha_arm64-userdebug',
+                        r'(\.zip|/XkbToKcmConverter)$'),
+    'X86_64_USERDEBUG': ('linux-bertha_x86_64-userdebug',
+                         r'(\.zip|/XkbToKcmConverter)$'),
+}
+
+# These refer to *_TARGET variables in Android ebuild files, used when
+# parsing ebuilds to determine the corresponding Android branch.
+# NOTE: We may use `|` operator to union dict keys after we completely go
+# Python 3.
+ANDROID_ALL_BUILD_TARGETS = frozenset(
+    x + '_TARGET' for x in itertools.chain(
+        ANDROID_PI_BUILD_TARGETS,
+        ANDROID_VMMST_BUILD_TARGETS,
+        ANDROID_VMRVC_BUILD_TARGETS,
+    )
+)
+
+ARC_BUCKET_URL = 'gs://chromeos-arc-images/builds'
+ARC_BUCKET_ACLS = {
+    'APPS': 'googlestorage_acl_public.txt',
+    'ARM': 'googlestorage_acl_arm.txt',
+    'ARM64': 'googlestorage_acl_arm.txt',
+    'X86': 'googlestorage_acl_x86.txt',
+    'X86_64': 'googlestorage_acl_x86.txt',
+    'ARM_USERDEBUG': 'googlestorage_acl_arm.txt',
+    'ARM64_USERDEBUG': 'googlestorage_acl_arm.txt',
+    'X86_USERDEBUG': 'googlestorage_acl_x86.txt',
+    'X86_64_USERDEBUG': 'googlestorage_acl_x86.txt',
+    'SDK_GOOGLE_X86_USERDEBUG': 'googlestorage_acl_x86.txt',
+    'SDK_GOOGLE_X86_64_USERDEBUG': 'googlestorage_acl_x86.txt',
+}
+ANDROID_SYMBOLS_URL_TEMPLATE = (
+    ARC_BUCKET_URL +
+    '/%(branch)s-linux-%(target)s_%(arch)s-%(variant)s/%(version)s'
+    '/%(target)s_%(arch)s%(suffix)s-symbols-%(version)s.zip')
+ANDROID_SYMBOLS_FILE = 'android-symbols.zip'
+# x86-user, x86-userdebug and x86-eng builders create build artifacts with the
+# same name, e.g. cheets_x86-target_files-${VERSION}.zip. Chrome OS builders
+# that need to select x86-user or x86-userdebug artifacts at emerge time need
+# the artifacts to have different filenames to avoid checksum failures. These
+# targets will have their artifacts renamed when the PFQ copies them from the
+# the Android bucket to the ARC++ bucket (b/33072485).
+ARC_BUILDS_NEED_ARTIFACTS_RENAMED = {
+    'ARM_USERDEBUG',
+    'ARM64_USERDEBUG',
+    'X86_USERDEBUG',
+    'X86_64_USERDEBUG',
+    'SDK_GOOGLE_X86_USERDEBUG',
+    'SDK_GOOGLE_X86_64_USERDEBUG',
+}
+# All builds will have the same name without target prefix.
+# Emerge checksum failures will be workarounded by ebuild rename symbol (->).
+ARC_ARTIFACTS_RENAME_NOT_NEEDED = [
+    'push_to_device.zip',
+    'sepolicy.zip',
+    'XkbToKcmConverter',
+]
+
+GOB_COOKIE_PATH = os.path.expanduser('~/.git-credential-cache/cookie')
+GITCOOKIES_PATH = os.path.expanduser('~/.gitcookies')
+
+# Timestamps in the JSON from GoB's web interface is of the form 'Tue
+# Dec 02 17:48:06 2014' and is assumed to be in UTC.
+GOB_COMMIT_TIME_FORMAT = '%a %b %d %H:%M:%S %Y'
+
+CHROMITE_PROJECT = 'chromiumos/chromite'
+CHROMITE_URL = '%s/%s' % (EXTERNAL_GOB_URL, CHROMITE_PROJECT)
+CHROMIUM_SRC_PROJECT = 'chromium/src'
+CHROMIUM_GOB_URL = '%s/%s.git' % (EXTERNAL_GOB_URL, CHROMIUM_SRC_PROJECT)
+CHROME_INTERNAL_PROJECT = 'chrome/src-internal'
+CHROME_INTERNAL_GOB_URL = '%s/%s.git' % (
+    INTERNAL_GOB_URL, CHROME_INTERNAL_PROJECT)
+
+DEFAULT_MANIFEST = 'default.xml'
+OFFICIAL_MANIFEST = 'official.xml'
+LKGM_MANIFEST = 'LKGM/lkgm.xml'
+
+SHARED_CACHE_ENVVAR = 'CROS_CACHEDIR'
+PARALLEL_EMERGE_STATUS_FILE_ENVVAR = 'PARALLEL_EMERGE_STATUS_FILE'
+
+# These projects can be responsible for infra failures.
+INFRA_PROJECTS = (CHROMITE_PROJECT,)
+
+# The manifest contains extra attributes in the 'project' nodes to determine our
+# branching strategy for the project.
+#   create: Create a new branch on the project repo for the new CrOS branch.
+#           This is the default.
+#   pin: On the CrOS branch, pin the project to the current revision.
+#   tot: On the CrOS branch, the project still tracks ToT.
+MANIFEST_ATTR_BRANCHING = 'branch-mode'
+MANIFEST_ATTR_BRANCHING_CREATE = 'create'
+MANIFEST_ATTR_BRANCHING_PIN = 'pin'
+MANIFEST_ATTR_BRANCHING_TOT = 'tot'
+MANIFEST_ATTR_BRANCHING_ALL = (
+    MANIFEST_ATTR_BRANCHING_CREATE,
+    MANIFEST_ATTR_BRANCHING_PIN,
+    MANIFEST_ATTR_BRANCHING_TOT,
+)
+
+STREAK_COUNTERS = 'streak_counters'
+
+PATCH_BRANCH = 'patch_branch'
+STABLE_EBUILD_BRANCH = 'stabilizing_branch'
+MERGE_BRANCH = 'merge_branch'
+
+# These branches are deleted at the beginning of every buildbot run.
+CREATED_BRANCHES = [
+    PATCH_BRANCH,
+    STABLE_EBUILD_BRANCH,
+    MERGE_BRANCH
+]
+
+# Default OS target packages.
+TARGET_OS_PKG = 'virtual/target-os'
+TARGET_OS_DEV_PKG = 'virtual/target-os-dev'
+TARGET_OS_TEST_PKG = 'virtual/target-os-test'
+TARGET_OS_FACTORY_PKG = 'virtual/target-os-factory'
+
+# Constants for uprevving Chrome
+
+CHROMEOS_BASE = 'chromeos-base'
+
+# Portage category and package name for Chrome.
+CHROME_CN = CHROMEOS_BASE
+CHROME_PN = 'chromeos-chrome'
+CHROME_CP = '%s/%s' % (CHROME_CN, CHROME_PN)
+
+# Other packages to uprev while uprevving Chrome.
+OTHER_CHROME_PACKAGES = ['chromeos-base/chromium-source',
+                         'chromeos-base/chrome-icu']
+
+# Chrome use flags
+USE_CHROME_INTERNAL = 'chrome_internal'
+USE_AFDO_USE = 'afdo_use'
+
+
+# Builds and validates _alpha ebuilds.  These builds sync to the latest
+# revsion of the Chromium src tree and build with that checkout.
+CHROME_REV_TOT = 'tot'
+
+# Builds and validates chrome at a given revision through cbuildbot
+# --chrome_version
+CHROME_REV_SPEC = 'spec'
+
+# Builds and validates the latest Chromium release as defined by
+# ~/trunk/releases in the Chrome src tree.  These ebuilds are suffixed with rc.
+CHROME_REV_LATEST = 'latest_release'
+
+# Builds and validates the latest Chromium release for a specific Chromium
+# branch that we want to watch.  These ebuilds are suffixed with rc.
+CHROME_REV_STICKY = 'stable_release'
+
+# Builds and validates Chromium for a pre-populated directory.
+# Also uses _alpha, since portage doesn't have anything lower.
+CHROME_REV_LOCAL = 'local'
+VALID_CHROME_REVISIONS = [CHROME_REV_TOT, CHROME_REV_LATEST,
+                          CHROME_REV_STICKY, CHROME_REV_LOCAL, CHROME_REV_SPEC]
+
+
+# Constants for uprevving Android.
+
+# Portage package name for Android container.
+ANDROID_PACKAGE_NAME = 'android-container'
+
+# Builds and validates the latest Android release.
+ANDROID_REV_LATEST = 'latest_release'
+VALID_ANDROID_REVISIONS = [ANDROID_REV_LATEST]
+
+# Build types supported.
+
+# TODO(sosa): Deprecate PFQ type.
+# Incremental builds that are built using binary packages when available.
+# These builds have less validation than other build types.
+INCREMENTAL_TYPE = 'binary'
+
+# These builds serve as PFQ builders.  This is being deprecated.
+PFQ_TYPE = 'pfq'
+
+# Android PFQ type.  Builds and validates new versions of Android.
+ANDROID_PFQ_TYPE = 'android'
+
+# Builds from source and non-incremental.  This builds fully wipe their
+# chroot before the start of every build and no not use a BINHOST.
+FULL_TYPE = 'full'
+
+# Full but with versioned logic.
+CANARY_TYPE = 'canary'
+
+# Generate payloads for an already built build/version.
+PAYLOADS_TYPE = 'payloads'
+
+# Similar behavior to canary, but used to validate toolchain changes.
+TOOLCHAIN_TYPE = 'toolchain'
+
+# Generic type of tryjob only build configs.
+TRYJOB_TYPE = 'tryjob'
+
+# Special build type for Chroot builders.  These builds focus on building
+# toolchains and validate that they work.
+CHROOT_BUILDER_TYPE = 'chroot'
+CHROOT_BUILDER_BOARD = 'amd64-host'
+
+# Use for builds that don't requite a type.
+GENERIC_TYPE = 'generic'
+
+VALID_BUILD_TYPES = (
+    INCREMENTAL_TYPE,
+    FULL_TYPE,
+    CANARY_TYPE,
+    CHROOT_BUILDER_TYPE,
+    CHROOT_BUILDER_BOARD,
+    ANDROID_PFQ_TYPE,
+    PFQ_TYPE,
+    PAYLOADS_TYPE,
+    TOOLCHAIN_TYPE,
+    TRYJOB_TYPE,
+    GENERIC_TYPE,
+)
+
+HWTEST_TRYBOT_NUM = 3
+HWTEST_QUOTA_POOL = 'quota'
+
+HWTEST_QUOTA_ACCOUNT_BVT = 'legacypool-bvt'
+HWTEST_QUOTA_ACCOUNT_BVT_SYNC = 'bvt-sync'
+HWTEST_QUOTA_ACCOUNT_PFQ = 'pfq'
+HWTEST_QUOTA_ACCOUNT_SUITES = 'legacypool-suites'
+HWTEST_QUOTA_ACCOUNT_TOOLCHAIN = 'toolchain'
+
+# How many total test retries should be done for a suite.
+HWTEST_MAX_RETRIES = 5
+
+# Defines for the various hardware test suites:
+#   BVT:  Basic blocking suite to be run against any build that
+#       requires a HWTest phase.
+#   COMMIT:  Suite of basic tests required for commits to the source
+#       tree.  Runs as a blocking suite on the CQ and PFQ; runs as
+#       a non-blocking suite on canaries.
+#   CANARY:  Non-blocking suite run only against the canaries.
+#   AFDO:  Non-blocking suite run only AFDO builders.
+#   MOBLAB: Blocking Suite run only on *_moblab builders.
+#   INSTALLER: Blocking suite run against all canaries; tests basic installer
+#              functionality.
+HWTEST_ARC_COMMIT_SUITE = 'bvt-arc'
+HWTEST_BVT_SUITE = 'bvt-inline'
+HWTEST_COMMIT_SUITE = 'bvt-cq'
+HWTEST_CANARY_SUITE = 'bvt-perbuild'
+HWTEST_INSTALLER_SUITE = 'bvt-installer'
+# Runs all non-informational Tast tests (exercising any of OS, Chrome, and ARC).
+HWTEST_TAST_CQ_SUITE = 'bvt-tast-cq'
+# Runs non-informational Tast tests exercising either Chrome or ARC.
+HWTEST_TAST_CHROME_PFQ_SUITE = 'bvt-tast-chrome-pfq'
+# Runs non-informational Tast tests exercising ARC.
+HWTEST_TAST_ANDROID_PFQ_SUITE = 'bvt-tast-android-pfq'
+# Runs all Tast informational tests.
+HWTEST_TAST_INFORMATIONAL_SUITE = 'bvt-tast-informational'
+HWTEST_AFDO_SUITE = 'AFDO_record'
+HWTEST_JETSTREAM_COMMIT_SUITE = 'jetstream_cq'
+HWTEST_MOBLAB_SUITE = 'moblab'
+HWTEST_MOBLAB_QUICK_SUITE = 'moblab_quick'
+HWTEST_SANITY_SUITE = 'sanity'
+HWTEST_TOOLCHAIN_SUITE = 'toolchain-tests'
+# Non-blocking informational hardware tests for Chrome, run throughout the
+# day on tip-of-trunk Chrome rather than on the daily Chrome branch.
+HWTEST_CHROME_INFORMATIONAL = 'chrome-informational'
+
+# Additional timeout to wait for autotest to abort a suite if the test takes
+# too long to run. This is meant to be overly conservative as a timeout may
+# indicate that autotest is at capacity.
+HWTEST_TIMEOUT_EXTENSION = 10 * 60
+
+HWTEST_WEEKLY_PRIORITY = 'Weekly'
+HWTEST_CTS_PRIORITY = 'CTS'
+HWTEST_GTS_PRIORITY = HWTEST_CTS_PRIORITY
+HWTEST_DAILY_PRIORITY = 'Daily'
+HWTEST_DEFAULT_PRIORITY = 'DEFAULT'
+HWTEST_CQ_PRIORITY = 'CQ'
+HWTEST_BUILD_PRIORITY = 'Build'
+HWTEST_PFQ_PRIORITY = 'PFQ'
+HWTEST_POST_BUILD_PRIORITY = 'PostBuild'
+
+# Ordered by priority (first item being lowest).
+HWTEST_VALID_PRIORITIES = [HWTEST_WEEKLY_PRIORITY,
+                           HWTEST_CTS_PRIORITY,
+                           HWTEST_DAILY_PRIORITY,
+                           HWTEST_POST_BUILD_PRIORITY,
+                           HWTEST_DEFAULT_PRIORITY,
+                           HWTEST_BUILD_PRIORITY,
+                           HWTEST_PFQ_PRIORITY,
+                           HWTEST_CQ_PRIORITY]
+
+# Creates a mapping of priorities to make easy comparsions.
+# Use the same priorities mapping as autotest/client/common_lib/priorities.py
+HWTEST_PRIORITIES_MAP = {
+    HWTEST_WEEKLY_PRIORITY: 10,
+    HWTEST_CTS_PRIORITY: 11,
+    HWTEST_DAILY_PRIORITY: 20,
+    HWTEST_POST_BUILD_PRIORITY: 30,
+    HWTEST_DEFAULT_PRIORITY: 40,
+    HWTEST_BUILD_PRIORITY: 50,
+    HWTEST_PFQ_PRIORITY: 60,
+    HWTEST_CQ_PRIORITY: 70}
+
+# Creates a mapping of priorities for skylab hwtest tasks. In swarming,
+# lower number means high priorities. Priority lower than 48 will be special
+# tasks. The upper bound of priority is 255.
+# Use the same priorities mapping as autotest/venv/skylab_suite/swarming_lib.py
+SKYLAB_HWTEST_PRIORITIES_MAP = {
+    HWTEST_WEEKLY_PRIORITY: 230,
+    HWTEST_CTS_PRIORITY: 215,
+    HWTEST_DAILY_PRIORITY: 200,
+    HWTEST_POST_BUILD_PRIORITY: 170,
+    HWTEST_DEFAULT_PRIORITY: 140,
+    HWTEST_BUILD_PRIORITY: 110,
+    HWTEST_PFQ_PRIORITY: 80,
+    HWTEST_CQ_PRIORITY: 50,
+}
+
+# The environment for executing tests.
+ENV_SKYLAB = 'skylab'
+ENV_AUTOTEST = 'autotest'
+
+# The cipd package for skylab tool
+CIPD_SKYLAB_PACKAGE = 'chromiumos/infra/skylab/linux-amd64'
+# crbug.com/1108489: The skylab tool CIPD package is pinned to a specific
+# version to avoid uncontrolled tool release and so that the tool is effectively
+# branched with cbuildbot.
+CIPD_SKYLAB_INSTANCE_ID = 'LU2Xmdk1oXyZPuiEfzDQhUWFMXY3jYQNPOzHRkRkZBEC'
+
+# HWTest result statuses
+HWTEST_STATUS_PASS = 'pass'
+HWTEST_STATUS_FAIL = 'fail'
+HWTEST_STATUS_ABORT = 'abort'
+HWTEST_STATUS_OTHER = 'other'
+HWTEST_STATUES_NOT_PASSED = frozenset([HWTEST_STATUS_FAIL,
+                                       HWTEST_STATUS_ABORT,
+                                       HWTEST_STATUS_OTHER])
+
+# Define HWTEST subsystem logic constants.
+SUBSYSTEMS = 'subsystems'
+SUBSYSTEM_UNUSED = 'subsystem_unused'
+
+# Build messages
+MESSAGE_TYPE_IGNORED_REASON = 'ignored_reason'
+MESSAGE_TYPE_ANNOTATIONS_FINALIZED = 'annotations_finalized'
+# MESSSGE_TYPE_IGNORED_REASON messages store the affected build as
+# the CIDB column message_value.
+MESSAGE_SUBTYPE_SELF_DESTRUCTION = 'self_destruction'
+
+# Define HWTEST job_keyvals
+JOB_KEYVAL_DATASTORE_PARENT_KEY = 'datastore_parent_key'
+JOB_KEYVAL_CIDB_BUILD_ID = 'cidb_build_id'
+JOB_KEYVAL_CIDB_BUILD_STAGE_ID = 'cidb_build_stage_id'
+JOB_KEYVAL_BUILD_CONFIG = 'build_config'
+JOB_KEYVAL_MASTER_BUILD_CONFIG = 'master_build_config'
+JOB_KEYVAL_BRANCH = 'branch'
+
+
+# How many total test retries should be done for a suite.
+VM_TEST_MAX_RETRIES = 5
+# Defines VM Test types.
+SIMPLE_AU_TEST_TYPE = 'pfq_suite'
+VM_SUITE_TEST_TYPE = 'vm_suite'
+GCE_SUITE_TEST_TYPE = 'gce_suite'
+CROS_VM_TEST_TYPE = 'cros_vm_test'
+DEV_MODE_TEST_TYPE = 'dev_mode_test'
+VALID_VM_TEST_TYPES = [
+    SIMPLE_AU_TEST_TYPE,
+    VM_SUITE_TEST_TYPE,
+    GCE_SUITE_TEST_TYPE,
+    CROS_VM_TEST_TYPE,
+    DEV_MODE_TEST_TYPE
+]
+VALID_GCE_TEST_SUITES = ['gce-smoke', 'gce-sanity']
+# MoblabVM tests are suites of tests used to validate a moblab image via
+# VMTests.
+MOBLAB_VM_SMOKE_TEST_TYPE = 'moblab_smoke_test'
+
+CHROMIUMOS_OVERLAY_DIR = 'src/third_party/chromiumos-overlay'
+PORTAGE_STABLE_OVERLAY_DIR = 'src/third_party/portage-stable'
+ECLASS_OVERLAY_DIR = 'src/third_party/eclass-overlay'
+CHROMEOS_PARTNER_OVERLAY_DIR = 'src/private-overlays/chromeos-partner-overlay/'
+PUBLIC_BINHOST_CONF_DIR = os.path.join(CHROMIUMOS_OVERLAY_DIR,
+                                       'chromeos/binhost')
+PRIVATE_BINHOST_CONF_DIR = os.path.join(CHROMEOS_PARTNER_OVERLAY_DIR,
+                                        'chromeos/binhost')
+
+VERSION_FILE = os.path.join(CHROMIUMOS_OVERLAY_DIR,
+                            'chromeos/config/chromeos_version.sh')
+SDK_VERSION_FILE = os.path.join(PUBLIC_BINHOST_CONF_DIR,
+                                'host/sdk_version.conf')
+SDK_GS_BUCKET = 'chromiumos-sdk'
+
+PUBLIC = 'public'
+PRIVATE = 'private'
+
+BOTH_OVERLAYS = 'both'
+PUBLIC_OVERLAYS = PUBLIC
+PRIVATE_OVERLAYS = PRIVATE
+VALID_OVERLAYS = [BOTH_OVERLAYS, PUBLIC_OVERLAYS, PRIVATE_OVERLAYS, None]
+
+# Common default logging settings for use with the logging module.
+LOGGER_FMT = '%(asctime)s: %(levelname)s: %(message)s'
+LOGGER_DATE_FMT = '%H:%M:%S'
+
+# Used by remote patch serialization/deserialzation.
+INTERNAL_PATCH_TAG = 'i'
+EXTERNAL_PATCH_TAG = 'e'
+PATCH_TAGS = (INTERNAL_PATCH_TAG, EXTERNAL_PATCH_TAG)
+
+GERRIT_ON_BORG_LABELS = {
+    'Code-Review': 'CRVW',
+    'Commit-Queue': 'COMR',
+    'Verified': 'VRIF',
+}
+
+# Environment variables that should be exposed to all children processes
+# invoked via cros_build_lib.run.
+ENV_PASSTHRU = ('CROS_SUDO_KEEP_ALIVE', SHARED_CACHE_ENVVAR,
+                PARALLEL_EMERGE_STATUS_FILE_ENVVAR)
+
+# List of variables to proxy into the chroot from the host, and to
+# have sudo export if existent. Anytime this list is modified, a new
+# chroot_version_hooks.d upgrade script that symlinks to 153_rewrite_sudoers.d
+# should be created.
+CHROOT_ENVIRONMENT_WHITELIST = (
+    'CHROMEOS_OFFICIAL',
+    'CHROMEOS_VERSION_AUSERVER',
+    'CHROMEOS_VERSION_DEVSERVER',
+    'CHROMEOS_VERSION_TRACK',
+    'GCC_GITHASH',
+    'GIT_AUTHOR_EMAIL',
+    'GIT_AUTHOR_NAME',
+    'GIT_COMMITTER_EMAIL',
+    'GIT_COMMITTER_NAME',
+    'GIT_PROXY_COMMAND',
+    'GIT_SSH',
+    'RSYNC_PROXY',
+    'SSH_AGENT_PID',
+    'SSH_AUTH_SOCK',
+    'TMUX',
+    'USE',
+    'all_proxy',
+    'ftp_proxy',
+    'http_proxy',
+    'https_proxy',
+    'no_proxy',
+)
+
+# Paths for Chrome LKGM which are relative to the Chromium base url.
+CHROME_LKGM_FILE = 'CHROMEOS_LKGM'
+PATH_TO_CHROME_LKGM = 'chromeos/%s' % CHROME_LKGM_FILE
+# Path for the Chrome LKGM's closest OWNERS file.
+PATH_TO_CHROME_CHROMEOS_OWNERS = 'chromeos/OWNERS'
+
+# Cache constants.
+COMMON_CACHE = 'common'
+
+# Artifact constants.
+def _SlashToUnderscore(string):
+  return string.replace('/', '_')
+
+# GCE tar ball constants.
+def ImageBinToGceTar(image_bin):
+  assert image_bin.endswith('.bin'), ('Filename %s does not end with ".bin"' %
+                                      image_bin)
+  return '%s_gce.tar.gz' % os.path.splitext(image_bin)[0]
+
+RELEASE_BUCKET = 'gs://chromeos-releases'
+TRASH_BUCKET = 'gs://chromeos-throw-away-bucket'
+CHROME_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
+CHROME_ENV_TAR = 'environment_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
+CHROME_ENV_FILE = 'environment'
+BASE_IMAGE_NAME = 'chromiumos_base_image'
+BASE_IMAGE_TAR = '%s.tar.xz' % BASE_IMAGE_NAME
+BASE_IMAGE_BIN = '%s.bin' % BASE_IMAGE_NAME
+BASE_IMAGE_GCE_TAR = ImageBinToGceTar(BASE_IMAGE_BIN)
+IMAGE_SCRIPTS_NAME = 'image_scripts'
+IMAGE_SCRIPTS_TAR = '%s.tar.xz' % IMAGE_SCRIPTS_NAME
+TARGET_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(TARGET_OS_PKG)
+VM_IMAGE_NAME = 'chromiumos_qemu_image'
+VM_IMAGE_BIN = '%s.bin' % VM_IMAGE_NAME
+VM_IMAGE_TAR = '%s.tar.xz' % VM_IMAGE_NAME
+VM_DISK_PREFIX = 'chromiumos_qemu_disk.bin'
+VM_MEM_PREFIX = 'chromiumos_qemu_mem.bin'
+VM_NUM_RETRIES = 0
+# Disabling Tast VM retries because of https://crbug.com/1098346.
+TAST_VM_NUM_RETRIES = 0
+TAST_VM_TEST_RESULTS = 'tast_vm_test_results_%(attempt)s'
+BASE_GUEST_VM_DIR = 'guest-vm-base'
+TEST_GUEST_VM_DIR = 'guest-vm-test'
+BASE_GUEST_VM_TAR = '%s.tar.xz' % BASE_GUEST_VM_DIR
+TEST_GUEST_VM_TAR = '%s.tar.xz' % TEST_GUEST_VM_DIR
+
+TEST_IMAGE_NAME = 'chromiumos_test_image'
+TEST_IMAGE_TAR = '%s.tar.xz' % TEST_IMAGE_NAME
+TEST_IMAGE_BIN = '%s.bin' % TEST_IMAGE_NAME
+TEST_IMAGE_GCE_TAR = ImageBinToGceTar(TEST_IMAGE_BIN)
+TEST_KEY_PRIVATE = 'id_rsa'
+TEST_KEY_PUBLIC = 'id_rsa.pub'
+
+DEV_IMAGE_NAME = 'chromiumos_image'
+DEV_IMAGE_BIN = '%s.bin' % DEV_IMAGE_NAME
+
+RECOVERY_IMAGE_NAME = 'recovery_image'
+RECOVERY_IMAGE_BIN = '%s.bin' % RECOVERY_IMAGE_NAME
+RECOVERY_IMAGE_TAR = '%s.tar.xz' % RECOVERY_IMAGE_NAME
+
+# Image type constants.
+IMAGE_TYPE_BASE = 'base'
+IMAGE_TYPE_DEV = 'dev'
+IMAGE_TYPE_TEST = 'test'
+IMAGE_TYPE_RECOVERY = 'recovery'
+IMAGE_TYPE_FACTORY = 'factory'
+IMAGE_TYPE_FIRMWARE = 'firmware'
+# USB PD accessory microcontroller firmware (e.g. power brick, display dongle).
+IMAGE_TYPE_ACCESSORY_USBPD = 'accessory_usbpd'
+# Standalone accessory microcontroller firmware (e.g. wireless keyboard).
+IMAGE_TYPE_ACCESSORY_RWSIG = 'accessory_rwsig'
+# Cr50 Firmware.
+IMAGE_TYPE_CR50_FIRMWARE = 'cr50_firmware'
+
+IMAGE_TYPE_TO_NAME = {
+    IMAGE_TYPE_BASE: BASE_IMAGE_BIN,
+    IMAGE_TYPE_DEV: DEV_IMAGE_BIN,
+    IMAGE_TYPE_RECOVERY: RECOVERY_IMAGE_BIN,
+    IMAGE_TYPE_TEST: TEST_IMAGE_BIN,
+}
+IMAGE_NAME_TO_TYPE = dict((v, k) for k, v in IMAGE_TYPE_TO_NAME.items())
+
+METADATA_JSON = 'metadata.json'
+PARTIAL_METADATA_JSON = 'partial-metadata.json'
+METADATA_TAGS = 'tags'
+DELTA_SYSROOT_TAR = 'delta_sysroot.tar.xz'
+DELTA_SYSROOT_BATCH = 'batch'
+
+FIRMWARE_ARCHIVE_NAME = 'firmware_from_source.tar.bz2'
+
+# Global configuration constants.
+CHROMITE_CONFIG_DIR = os.path.expanduser('~/.chromite')
+CHROME_SDK_BASHRC = os.path.join(CHROMITE_CONFIG_DIR, 'chrome_sdk.bashrc')
+SYNC_RETRIES = 4
+SLEEP_TIMEOUT = 30
+
+# Lab status url.
+LAB_STATUS_URL = 'http://chromiumos-lab.appspot.com/current?format=json'
+
+GOLO_SMTP_SERVER = 'mail.golo.chromium.org'
+
+CHROME_GARDENER = 'chrome'
+# Email alias to add as reviewer in Gerrit, which GWSQ will then automatically
+# assign to the current gardener.
+CHROME_GARDENER_REVIEW_EMAIL = '[email protected]'
+
+# Useful config targets.
+CANARY_MASTER = 'master-release'
+PFQ_MASTER = 'master-chromium-pfq'
+VMMST_ANDROID_PFQ_MASTER = 'master-vmmst-android-pfq'
+PI_ANDROID_PFQ_MASTER = 'master-pi-android-pfq'
+VMRVC_ANDROID_PFQ_MASTER = 'master-vmrvc-android-pfq'
+TOOLCHAIN_MASTTER = 'master-toolchain'
+
+
+# Email validation regex. Not quite fully compliant with RFC 2822, but good
+# approximation.
+EMAIL_REGEX = r'[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}'
+
+# Blacklist of files not allowed to be uploaded into the Partner Project Google
+# Storage Buckets:
+# debug.tgz contains debug symbols.
+# manifest.xml exposes all of our repo names.
+# vm_test_results can contain symbolicated crash dumps.
+EXTRA_BUCKETS_FILES_BLACKLIST = [
+    'debug.tgz',
+    'manifest.xml',
+    'vm_test_results_*'
+]
+
+# AFDO common constants.
+# How long does the AFDO_record autotest have to generate the AFDO perf data.
+AFDO_GENERATE_TIMEOUT = 120 * 60
+
+# Gmail Credentials.
+GMAIL_TOKEN_CACHE_FILE = os.path.expanduser('~/.gmail_credentials')
+GMAIL_TOKEN_JSON_FILE = '/creds/refresh_tokens/chromeos_gmail_alerts'
+
+# Maximum number of boards per release group builder. This should be
+# chosen/adjusted based on expected release build times such that successive
+# builds don't overlap and create a backlog.
+MAX_RELEASE_GROUP_BOARDS = 4
+
+CHROMEOS_SERVICE_ACCOUNT = os.path.join('/', 'creds', 'service_accounts',
+                                        'service-account-chromeos.json')
+
+# Buildbucket buckets
+CHROMEOS_RELEASE_BUILDBUCKET_BUCKET = 'master.chromeos_release'
+CHROMEOS_BUILDBUCKET_BUCKET = 'master.chromeos'
+INTERNAL_SWARMING_BUILDBUCKET_BUCKET = 'luci.chromeos.general'
+
+ACTIVE_BUCKETS = [
+    CHROMEOS_RELEASE_BUILDBUCKET_BUCKET,
+    CHROMEOS_BUILDBUCKET_BUCKET,
+    INTERNAL_SWARMING_BUILDBUCKET_BUCKET,
+]
+
+# Build retry limit on buildbucket
+#
+# 2020-05-13 by engeg@: This is rarely effective, causes confusion,
+# higher bot utilization, and if the initial try was past uploading artifacts
+# then the retry is destined to fail with a difficult to parse error.
+# 2020-05-19 by seanabraham@: Leave this at zero. These retries can break
+# Chrome-wide profiling. http://b/156994019
+BUILDBUCKET_BUILD_RETRY_LIMIT = 0  # Do not change. Read the above.
+
+# TODO(nxia): consolidate all run.metadata key constants,
+# add a unit test to avoid duplicated keys in run_metadata
+
+# Builder_run metadata keys
+METADATA_SCHEDULED_IMPORTANT_SLAVES = 'scheduled_important_slaves'
+METADATA_SCHEDULED_EXPERIMENTAL_SLAVES = 'scheduled_experimental_slaves'
+METADATA_UNSCHEDULED_SLAVES = 'unscheduled_slaves'
+# List of builders marked as experimental through the tree status, not all the
+# experimental builders for a run.
+METADATA_EXPERIMENTAL_BUILDERS = 'experimental_builders'
+
+# Metadata key to indicate whether a build is self-destructed.
+SELF_DESTRUCTED_BUILD = 'self_destructed_build'
+
+# Metadata key to indicate whether a build is self-destructed with success.
+SELF_DESTRUCTED_WITH_SUCCESS_BUILD = 'self_destructed_with_success_build'
+
+# Chroot snapshot names
+CHROOT_SNAPSHOT_CLEAN = 'clean-chroot'
+
+# Partition labels.
+PART_STATE = 'STATE'
+PART_ROOT_A = 'ROOT-A'
+PART_ROOT_B = 'ROOT-B'
+PART_KERN_A = 'KERN-A'
+PART_KERN_B = 'KERN-B'
+
+# Quick provision payloads. These file names should never be changed, otherwise
+# very bad things can happen :). The reason is we have already uploaded these
+# files with these names for all boards. So if the name changes, all scripts
+# that have been using this need to handle both cases to be backward compatible.
+QUICK_PROVISION_PAYLOAD_KERNEL = 'full_dev_part_KERN.bin.gz'
+QUICK_PROVISION_PAYLOAD_ROOTFS = 'full_dev_part_ROOT.bin.gz'
+
+# Mock build and stage IDs.
+MOCK_STAGE_ID = 313377
+MOCK_BUILD_ID = 31337
+
+# Topology dictionary copied from CIDB.
+TOPOLOGY_DICT = {
+    '/buildbucket/host':
+        'cr-buildbucket.appspot.com',
+    '/chrome_swarming_proxy/host':
+        'chromeos-swarming.appspot.com',
+    '/datastore/creds_file': ('/creds/service_accounts/service-account-chromeos'
+                              '-datastore-writer-prod.json'),
+    '/sheriffomatic/host':
+        'sheriff-o-matic.appspot.com',
+    '/statsd/es_host':
+        '104.154.79.237',
+    '/statsd/host':
+        '104.154.79.237',
+}
+
+# Percentage of child builders that need to complete to update LKGM
+LKGM_THRESHOLD = 80
diff --git a/utils/frozen_chromite/lib/cros_build_lib.py b/utils/frozen_chromite/lib/cros_build_lib.py
new file mode 100644
index 0000000..72c0a42
--- /dev/null
+++ b/utils/frozen_chromite/lib/cros_build_lib.py
@@ -0,0 +1,1827 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common python commands used by various build scripts."""
+
+from __future__ import print_function
+
+import base64
+import contextlib
+from datetime import datetime
+import email.utils
+import errno
+import functools
+import getpass
+import inspect
+import operator
+import os
+import re
+import signal
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+
+import six
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_collections
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import signals
+
+
+STRICT_SUDO = False
+
+# For use by ShellQuote.  Match all characters that the shell might treat
+# specially.  This means a number of things:
+#  - Reserved characters.
+#  - Characters used in expansions (brace, variable, path, globs, etc...).
+#  - Characters that an interactive shell might use (like !).
+#  - Whitespace so that one arg turns into multiple.
+# See the bash man page as well as the POSIX shell documentation for more info:
+#   http://www.gnu.org/software/bash/manual/bashref.html
+#   http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html
+_SHELL_QUOTABLE_CHARS = frozenset('[|&;()<> \t!{}[]=*?~$"\'\\#^')
+# The chars that, when used inside of double quotes, need escaping.
+# Order here matters as we need to escape backslashes first.
+_SHELL_ESCAPE_CHARS = r'\"`$'
+
+# The number of files is larger than this, we will use -T option
+# and files to be added may not show up to the command line.
+_THRESHOLD_TO_USE_T_FOR_TAR = 50
+
+
+def ShellQuote(s):
+  """Quote |s| in a way that is safe for use in a shell.
+
+  We aim to be safe, but also to produce "nice" output.  That means we don't
+  use quotes when we don't need to, and we prefer to use less quotes (like
+  putting it all in single quotes) than more (using double quotes and escaping
+  a bunch of stuff, or mixing the quotes).
+
+  While python does provide a number of alternatives like:
+   - pipes.quote
+   - shlex.quote
+  They suffer from various problems like:
+   - Not widely available in different python versions.
+   - Do not produce pretty output in many cases.
+   - Are in modules that rarely otherwise get used.
+
+  Note: We don't handle reserved shell words like "for" or "case".  This is
+  because those only matter when they're the first element in a command, and
+  there is no use case for that.  When we want to run commands, we tend to
+  run real programs and not shell ones.
+
+  Args:
+    s: The string to quote.
+
+  Returns:
+    A safely (possibly quoted) string.
+  """
+  if sys.version_info.major < 3:
+    # This is a bit of a hack.  Python 2 will display strings with u prefixes
+    # when logging which makes things harder to work with.  Writing bytes to
+    # stdout will be interpreted as UTF-8 content implicitly.
+    if isinstance(s, six.string_types):
+      try:
+        s = s.encode('utf-8')
+      except UnicodeDecodeError:
+        # We tried our best.  Let Python's automatic mixed encoding kick in.
+        pass
+    else:
+      return repr(s)
+  else:
+    # If callers pass down bad types, don't blow up.
+    if isinstance(s, six.binary_type):
+      s = s.decode('utf-8', 'backslashreplace')
+    elif not isinstance(s, six.string_types):
+      return repr(s)
+
+  # See if no quoting is needed so we can return the string as-is.
+  for c in s:
+    if c in _SHELL_QUOTABLE_CHARS:
+      break
+  else:
+    if not s:
+      return "''"
+    else:
+      return s
+
+  # See if we can use single quotes first.  Output is nicer.
+  if "'" not in s:
+    return "'%s'" % s
+
+  # Have to use double quotes.  Escape the few chars that still expand when
+  # used inside of double quotes.
+  for c in _SHELL_ESCAPE_CHARS:
+    if c in s:
+      s = s.replace(c, r'\%s' % c)
+  return '"%s"' % s
+
+
+def TruncateStringToLine(s, maxlen=80):
+  """Truncate |s| to a maximum length of |maxlen| including elipsis (...)
+
+  Args:
+    s: A string.
+    maxlen: Maximum length of desired returned string. Must be at least 3.
+
+  Returns:
+    s if len(s) <= maxlen already and s has no newline in it.
+    Otherwise, a single line truncation that ends with '...' and is of
+    length |maxlen|.
+  """
+  assert maxlen >= 3
+  line = s.splitlines()[0]
+  if len(line) <= maxlen:
+    return line
+  else:
+    return line[:maxlen-3] + '...'
+
+
+def ShellUnquote(s):
+  """Do the opposite of ShellQuote.
+
+  This function assumes that the input is a valid escaped string. The behaviour
+  is undefined on malformed strings.
+
+  Args:
+    s: An escaped string.
+
+  Returns:
+    The unescaped version of the string.
+  """
+  if not s:
+    return ''
+
+  if s[0] == "'":
+    return s[1:-1]
+
+  if s[0] != '"':
+    return s
+
+  s = s[1:-1]
+  output = ''
+  i = 0
+  while i < len(s) - 1:
+    # Skip the backslash when it makes sense.
+    if s[i] == '\\' and s[i + 1] in _SHELL_ESCAPE_CHARS:
+      i += 1
+    output += s[i]
+    i += 1
+  return output + s[i] if i < len(s) else output
+
+
+def CmdToStr(cmd):
+  """Translate a command list into a space-separated string.
+
+  The resulting string should be suitable for logging messages and for
+  pasting into a terminal to run.  Command arguments are surrounded by
+  quotes to keep them grouped, even if an argument has spaces in it.
+
+  Examples:
+    ['a', 'b'] ==> "'a' 'b'"
+    ['a b', 'c'] ==> "'a b' 'c'"
+    ['a', 'b\'c'] ==> '\'a\' "b\'c"'
+    [u'a', "/'$b"] ==> '\'a\' "/\'$b"'
+    [] ==> ''
+    See unittest for additional (tested) examples.
+
+  Args:
+    cmd: List of command arguments.
+
+  Returns:
+    String representing full command.
+  """
+  # If callers pass down bad types, triage it a bit.
+  if isinstance(cmd, (list, tuple)):
+    return ' '.join(ShellQuote(arg) for arg in cmd)
+  else:
+    raise ValueError('cmd must be list or tuple, not %s: %r' %
+                     (type(cmd), repr(cmd)))
+
+
+class CompletedProcess(getattr(subprocess, 'CompletedProcess', object)):
+  """An object to store various attributes of a child process.
+
+  This is akin to subprocess.CompletedProcess.
+  """
+
+  # The linter is confused by the getattr usage above.
+  # TODO(vapier): Drop this once we're Python 3-only and we drop getattr.
+  # pylint: disable=bad-option-value,super-on-old-class
+  def __init__(self, args=None, returncode=None, stdout=None, stderr=None):
+    if sys.version_info.major < 3:
+      self.args = args
+      self.stdout = stdout
+      self.stderr = stderr
+      self.returncode = returncode
+    else:
+      super(CompletedProcess, self).__init__(
+          args=args, returncode=returncode, stdout=stdout, stderr=stderr)
+
+  @property
+  def cmd(self):
+    """Alias to self.args to better match other subprocess APIs."""
+    return self.args
+
+  @property
+  def cmdstr(self):
+    """Return self.cmd as a well shell-quoted string useful for log messages."""
+    if self.args is None:
+      return ''
+    else:
+      return CmdToStr(self.args)
+
+  def check_returncode(self):
+    """Raise CalledProcessError if the exit code is non-zero."""
+    if self.returncode:
+      raise CalledProcessError(
+          returncode=self.returncode, cmd=self.args, stdout=self.stdout,
+          stderr=self.stderr, msg='check_returncode failed')
+
+
+# TODO(crbug.com/1006587): Migrate users to CompletedProcess and drop this.
+class CommandResult(CompletedProcess):
+  """An object to store various attributes of a child process.
+
+  This is akin to subprocess.CompletedProcess.
+  """
+
+  # The linter is confused by the getattr usage above.
+  # TODO(vapier): Drop this once we're Python 3-only and we drop getattr.
+  # pylint: disable=bad-option-value,super-on-old-class
+  def __init__(self, cmd=None, error=None, output=None, returncode=None,
+               args=None, stdout=None, stderr=None):
+    if args is None:
+      args = cmd
+    elif cmd is not None:
+      raise TypeError('Only specify |args|, not |cmd|')
+    if stdout is None:
+      stdout = output
+    elif output is not None:
+      raise TypeError('Only specify |stdout|, not |output|')
+    if stderr is None:
+      stderr = error
+    elif error is not None:
+      raise TypeError('Only specify |stderr|, not |error|')
+
+    super(CommandResult, self).__init__(args=args, stdout=stdout, stderr=stderr,
+                                        returncode=returncode)
+
+  @property
+  def output(self):
+    """Backwards compat API."""
+    return self.stdout
+
+  @property
+  def error(self):
+    """Backwards compat API."""
+    return self.stderr
+
+
+class CalledProcessError(subprocess.CalledProcessError):
+  """Error caught in run() function.
+
+  This is akin to subprocess.CalledProcessError.  We do not support |output|,
+  only |stdout|.
+
+  Attributes:
+    returncode: The exit code of the process.
+    cmd: The command that triggered this exception.
+    msg: Short explanation of the error.
+    exception: The underlying Exception if available.
+  """
+
+  def __init__(self, returncode, cmd, stdout=None, stderr=None, msg=None,
+               exception=None):
+    if exception is not None and not isinstance(exception, Exception):
+      raise TypeError('exception must be an exception instance; got %r'
+                      % (exception,))
+
+    super(CalledProcessError, self).__init__(returncode, cmd, stdout)
+    # The parent class will set |output|, so delete it.
+    del self.output
+    # TODO(vapier): When we're Python 3-only, delete this assignment as the
+    # parent handles it for us.
+    self.stdout = stdout
+    # TODO(vapier): When we're Python 3-only, move stderr to the init above.
+    self.stderr = stderr
+    self.msg = msg
+    self.exception = exception
+
+  @property
+  def cmdstr(self):
+    """Return self.cmd as a well shell-quoted string useful for log messages."""
+    if self.cmd is None:
+      return ''
+    else:
+      return CmdToStr(self.cmd)
+
+  def Stringify(self, stdout=True, stderr=True):
+    """Custom method for controlling what is included in stringifying this.
+
+    Args:
+      stdout: Whether to include captured stdout in the return value.
+      stderr: Whether to include captured stderr in the return value.
+
+    Returns:
+      A summary string for this result.
+    """
+    items = [
+        u'return code: %s; command: %s' % (
+            self.returncode, self.cmdstr),
+    ]
+    if stderr and self.stderr:
+      stderr = self.stderr
+      if isinstance(stderr, six.binary_type):
+        stderr = stderr.decode('utf-8', 'replace')
+      items.append(stderr)
+    if stdout and self.stdout:
+      stdout = self.stdout
+      if isinstance(stdout, six.binary_type):
+        stdout = stdout.decode('utf-8', 'replace')
+      items.append(stdout)
+    if self.msg:
+      msg = self.msg
+      if isinstance(msg, six.binary_type):
+        msg = msg.decode('utf-8', 'replace')
+      items.append(msg)
+    return u'\n'.join(items)
+
+  def __str__(self):
+    if sys.version_info.major < 3:
+      # __str__ needs to return ascii, thus force a conversion to be safe.
+      return self.Stringify().encode('ascii', 'xmlcharrefreplace')
+    else:
+      return self.Stringify()
+
+  def __eq__(self, other):
+    return (isinstance(other, type(self)) and
+            self.returncode == other.returncode and
+            self.cmd == other.cmd and
+            self.stdout == other.stdout and
+            self.stderr == other.stderr and
+            self.msg == other.msg and
+            self.exception == other.exception)
+
+  def __ne__(self, other):
+    return not self.__eq__(other)
+
+
+# TODO(crbug.com/1006587): Migrate users to CompletedProcess and drop this.
+class RunCommandError(CalledProcessError):
+  """Error caught in run() method.
+
+  Attributes:
+    args: Tuple of the attributes below.
+    msg: Short explanation of the error.
+    result: The CommandResult that triggered this error, if available.
+    exception: The underlying Exception if available.
+  """
+
+  def __init__(self, msg, result=None, exception=None):
+    # This makes mocking tests easier.
+    if result is None:
+      result = CommandResult()
+    elif not isinstance(result, CommandResult):
+      raise TypeError('result must be a CommandResult instance; got %r'
+                      % (result,))
+
+    self.args = (msg, result, exception)
+    self.result = result
+    super(RunCommandError, self).__init__(
+        returncode=result.returncode, cmd=result.args, stdout=result.stdout,
+        stderr=result.stderr, msg=msg, exception=exception)
+
+
+class TerminateRunCommandError(RunCommandError):
+  """We were signaled to shutdown while running a command.
+
+  Client code shouldn't generally know, nor care about this class.  It's
+  used internally to suppress retry attempts when we're signaled to die.
+  """
+
+
+def sudo_run(cmd, user='root', preserve_env=False, **kwargs):
+  """Run a command via sudo.
+
+  Client code must use this rather than coming up with their own run
+  invocation that jams sudo in- this function is used to enforce certain
+  rules in our code about sudo usage, and as a potential auditing point.
+
+  Args:
+    cmd: The command to run.  See run for rules of this argument: sudo_run
+         purely prefixes it with sudo.
+    user: The user to run the command as.
+    preserve_env (bool): Whether to preserve the environment.
+    kwargs: See run() options, it's a direct pass thru to it.
+          Note that this supports a 'strict' keyword that defaults to True.
+          If set to False, it'll suppress strict sudo behavior.
+
+  Returns:
+    See run documentation.
+
+  Raises:
+    This function may immediately raise RunCommandError if we're operating
+    in a strict sudo context and the API is being misused.
+    Barring that, see run's documentation: it can raise the same things run
+    does.
+  """
+  sudo_cmd = ['sudo']
+
+  strict = kwargs.pop('strict', True)
+
+  if user == 'root' and os.geteuid() == 0:
+    return run(cmd, **kwargs)
+
+  if strict and STRICT_SUDO:
+    if 'CROS_SUDO_KEEP_ALIVE' not in os.environ:
+      raise RunCommandError(
+          'We were invoked in a strict sudo non - interactive context, but no '
+          'sudo keep alive daemon is running.  This is a bug in the code.',
+          CommandResult(args=cmd, returncode=126))
+    sudo_cmd += ['-n']
+
+  if user != 'root':
+    sudo_cmd += ['-u', user]
+
+  if preserve_env:
+    sudo_cmd += ['--preserve-env']
+
+  # Pass these values down into the sudo environment, since sudo will
+  # just strip them normally.
+  extra_env = kwargs.pop('extra_env', None)
+  extra_env = {} if extra_env is None else extra_env.copy()
+
+  for var in constants.ENV_PASSTHRU:
+    if var not in extra_env and var in os.environ:
+      extra_env[var] = os.environ[var]
+
+  sudo_cmd.extend('%s=%s' % (k, v) for k, v in extra_env.items())
+
+  # Finally, block people from passing options to sudo.
+  sudo_cmd.append('--')
+
+  if isinstance(cmd, six.string_types):
+    # We need to handle shell ourselves so the order is correct:
+    #  $ sudo [sudo args] -- bash -c '[shell command]'
+    # If we let run take care of it, we'd end up with:
+    #  $ bash -c 'sudo [sudo args] -- [shell command]'
+    shell = kwargs.pop('shell', False)
+    if not shell:
+      raise Exception('Cannot run a string command without a shell')
+    sudo_cmd.extend(['/bin/bash', '-c', cmd])
+  else:
+    sudo_cmd.extend(cmd)
+
+  return run(sudo_cmd, **kwargs)
+
+
+def _KillChildProcess(proc, int_timeout, kill_timeout, cmd, original_handler,
+                      signum, frame):
+  """Used as a signal handler by run.
+
+  This is internal to run.  No other code should use this.
+  """
+  if signum:
+    # If we've been invoked because of a signal, ignore delivery of that signal
+    # from this point forward.  The invoking context of _KillChildProcess
+    # restores signal delivery to what it was prior; we suppress future delivery
+    # till then since this code handles SIGINT/SIGTERM fully including
+    # delivering the signal to the original handler on the way out.
+    signal.signal(signum, signal.SIG_IGN)
+
+  # Do not trust Popen's returncode alone; we can be invoked from contexts where
+  # the Popen instance was created, but no process was generated.
+  if proc.returncode is None and proc.pid is not None:
+    try:
+      while proc.poll_lock_breaker() is None and int_timeout >= 0:
+        time.sleep(0.1)
+        int_timeout -= 0.1
+
+      proc.terminate()
+      while proc.poll_lock_breaker() is None and kill_timeout >= 0:
+        time.sleep(0.1)
+        kill_timeout -= 0.1
+
+      if proc.poll_lock_breaker() is None:
+        # Still doesn't want to die.  Too bad, so sad, time to die.
+        proc.kill()
+    except EnvironmentError as e:
+      logging.warning('Ignoring unhandled exception in _KillChildProcess: %s',
+                      e)
+
+    # Ensure our child process has been reaped.
+    kwargs = {}
+    if sys.version_info.major >= 3:
+      # ... but don't wait forever.
+      kwargs['timeout'] = 60
+    proc.wait_lock_breaker(**kwargs)
+
+  if not signals.RelaySignal(original_handler, signum, frame):
+    # Mock up our own, matching exit code for signaling.
+    cmd_result = CommandResult(args=cmd, returncode=signum << 8)
+    raise TerminateRunCommandError('Received signal %i' % signum, cmd_result)
+
+
+class _Popen(subprocess.Popen):
+  """subprocess.Popen derivative customized for our usage.
+
+  Specifically, we fix terminate/send_signal/kill to work if the child process
+  was a setuid binary; on vanilla kernels, the parent can wax the child
+  regardless, on goobuntu this apparently isn't allowed, thus we fall back
+  to the sudo machinery we have.
+
+  While we're overriding send_signal, we also suppress ESRCH being raised
+  if the process has exited, and suppress signaling all together if the process
+  has knowingly been waitpid'd already.
+  """
+
+  # Pylint seems to be buggy with the send_signal signature detection.
+  # pylint: disable=arguments-differ
+  def send_signal(self, sig):
+    if self.returncode is not None:
+      # The original implementation in Popen would allow signaling whatever
+      # process now occupies this pid, even if the Popen object had waitpid'd.
+      # Since we can escalate to sudo kill, we do not want to allow that.
+      # Fixing this addresses that angle, and makes the API less sucky in the
+      # process.
+      return
+
+    try:
+      os.kill(self.pid, sig)
+    except EnvironmentError as e:
+      if e.errno == errno.EPERM:
+        # Kill returns either 0 (signal delivered), or 1 (signal wasn't
+        # delivered).  This isn't particularly informative, but we still
+        # need that info to decide what to do, thus the check=False.
+        ret = sudo_run(['kill', '-%i' % sig, str(self.pid)],
+                       print_cmd=False, stdout=True,
+                       stderr=True, check=False)
+        if ret.returncode == 1:
+          # The kill binary doesn't distinguish between permission denied,
+          # and the pid is missing.  Denied can only occur under weird
+          # grsec/selinux policies.  We ignore that potential and just
+          # assume the pid was already dead and try to reap it.
+          self.poll()
+      elif e.errno == errno.ESRCH:
+        # Since we know the process is dead, reap it now.
+        # Normally Popen would throw this error- we suppress it since frankly
+        # that's a misfeature and we're already overriding this method.
+        self.poll()
+      else:
+        raise
+
+  def _lock_breaker(self, func, *args, **kwargs):
+    """Helper to manage the waitpid lock.
+
+    Workaround https://bugs.python.org/issue25960.
+    """
+    # If the lock doesn't exist, or is not locked, call the func directly.
+    lock = getattr(self, '_waitpid_lock', None)
+    if lock is not None and lock.locked():
+      try:
+        lock.release()
+        return func(*args, **kwargs)
+      finally:
+        if not lock.locked():
+          lock.acquire()
+    else:
+      return func(*args, **kwargs)
+
+  def poll_lock_breaker(self, *args, **kwargs):
+    """Wrapper around poll() to break locks if needed."""
+    return self._lock_breaker(self.poll, *args, **kwargs)
+
+  def wait_lock_breaker(self, *args, **kwargs):
+    """Wrapper around wait() to break locks if needed."""
+    return self._lock_breaker(self.wait, *args, **kwargs)
+
+
+# pylint: disable=redefined-builtin
+def run(cmd, print_cmd=True, stdout=None, stderr=None,
+        cwd=None, input=None, enter_chroot=False,
+        shell=False, env=None, extra_env=None, ignore_sigint=False,
+        chroot_args=None, debug_level=logging.INFO,
+        check=True, int_timeout=1, kill_timeout=1,
+        log_output=False, capture_output=False,
+        quiet=False, encoding=None, errors=None, dryrun=False,
+        **kwargs):
+  """Runs a command.
+
+  Args:
+    cmd: cmd to run.  Should be input to subprocess.Popen. If a string, shell
+      must be true. Otherwise the command must be an array of arguments, and
+      shell must be false.
+    print_cmd: prints the command before running it.
+    stdout: Where to send stdout.  This may be many things to control
+      redirection:
+        * None is the default; the existing stdout is used.
+        * An existing file object (must be opened with mode 'w' or 'wb').
+        * A string to a file (will be truncated & opened automatically).
+        * subprocess.PIPE to capture & return the output.
+        * A boolean to indicate whether to capture the output.
+          True will capture the output via a tempfile (good for large output).
+        * An open file descriptor (as a positive integer).
+    stderr: Where to send stderr.  See |stdout| for possible values.  This also
+      may be subprocess.STDOUT to indicate stderr & stdout should be combined.
+    cwd: the working directory to run this cmd.
+    input: The data to pipe into this command through stdin.  If a file object
+      or file descriptor, stdin will be connected directly to that.
+    enter_chroot: this command should be run from within the chroot.  If set,
+      cwd must point to the scripts directory. If we are already inside the
+      chroot, this command will be run as if |enter_chroot| is False.
+    shell: Controls whether we add a shell as a command interpreter.  See cmd
+      since it has to agree as to the type.
+    env: If non-None, this is the environment for the new process.  If
+      enter_chroot is true then this is the environment of the enter_chroot,
+      most of which gets removed from the cmd run.
+    extra_env: If set, this is added to the environment for the new process.
+      In enter_chroot=True case, these are specified on the post-entry
+      side, and so are often more useful.  This dictionary is not used to
+      clear any entries though.
+    ignore_sigint: If True, we'll ignore signal.SIGINT before calling the
+      child.  This is the desired behavior if we know our child will handle
+      Ctrl-C.  If we don't do this, I think we and the child will both get
+      Ctrl-C at the same time, which means we'll forcefully kill the child.
+    chroot_args: An array of arguments for the chroot environment wrapper.
+    debug_level: The debug level of run's output.
+    check: Whether to raise an exception when command returns a non-zero exit
+      code, or return the CommandResult object containing the exit code.
+      Note: will still raise an exception if the cmd file does not exist.
+    int_timeout: If we're interrupted, how long (in seconds) should we give the
+      invoked process to clean up before we send a SIGTERM.
+    kill_timeout: If we're interrupted, how long (in seconds) should we give the
+      invoked process to shutdown from a SIGTERM before we SIGKILL it.
+    log_output: Log the command and its output automatically.
+    capture_output: Set |stdout| and |stderr| to True.
+    quiet: Set |print_cmd| to False, and |capture_output| to True.
+    encoding: Encoding for stdin/stdout/stderr, otherwise bytes are used.  Most
+      users want 'utf-8' here for string data.
+    errors: How to handle errors when |encoding| is used.  Defaults to 'strict',
+      but 'ignore' and 'replace' are common settings.
+    dryrun: Only log the command,and return a stub result.
+
+  Returns:
+    A CommandResult object.
+
+  Raises:
+    RunCommandError: Raised on error.
+  """
+  # Hide this function in pytest tracebacks when a RunCommandError is raised,
+  # as seeing the contents of this function when a command fails is not helpful.
+  # https://docs.pytest.org/en/latest/example/simple.html#writing-well-integrated-assertion-helpers
+  __tracebackhide__ = operator.methodcaller('errisinstance', RunCommandError)
+
+  # Handle backwards compatible settings.
+  if 'log_stdout_to_file' in kwargs:
+    logging.warning('run: log_stdout_to_file=X is now stdout=X')
+    log_stdout_to_file = kwargs.pop('log_stdout_to_file')
+    if log_stdout_to_file is not None:
+      stdout = log_stdout_to_file
+  stdout_file_mode = 'w+b'
+  if 'append_to_file' in kwargs:
+    # TODO(vapier): Enable this warning once chromite & users migrate.
+    # logging.warning('run: append_to_file is now part of stdout')
+    if kwargs.pop('append_to_file'):
+      stdout_file_mode = 'a+b'
+  assert not kwargs, 'Unknown arguments to run: %s' % (list(kwargs),)
+
+  if quiet:
+    print_cmd = False
+    capture_output = True
+
+  if capture_output:
+    # TODO(vapier): Enable this once we migrate all the legacy arguments above.
+    # if stdout is not None or stderr is not None:
+    #   raise ValueError('capture_output may not be used with stdout & stderr')
+    # TODO(vapier): Drop this specialization once we're Python 3-only as we can
+    # pass this argument down to Popen directly.
+    if stdout is None:
+      stdout = True
+    if stderr is None:
+      stderr = True
+
+  if encoding is not None and errors is None:
+    errors = 'strict'
+
+  # Set default for variables.
+  popen_stdout = None
+  popen_stderr = None
+  stdin = None
+  cmd_result = CommandResult()
+
+  # Force the timeout to float; in the process, if it's not convertible,
+  # a self-explanatory exception will be thrown.
+  kill_timeout = float(kill_timeout)
+
+  def _get_tempfile():
+    try:
+      return UnbufferedTemporaryFile()
+    except EnvironmentError as e:
+      if e.errno != errno.ENOENT:
+        raise
+      # This can occur if we were pointed at a specific location for our
+      # TMP, but that location has since been deleted.  Suppress that issue
+      # in this particular case since our usage gurantees deletion,
+      # and since this is primarily triggered during hard cgroups shutdown.
+      return UnbufferedTemporaryFile(dir='/tmp')
+
+  # Modify defaults based on parameters.
+  # Note that tempfiles must be unbuffered else attempts to read
+  # what a separate process did to that file can result in a bad
+  # view of the file.
+  log_stdout_to_file = False
+  if isinstance(stdout, six.string_types):
+    popen_stdout = open(stdout, stdout_file_mode)
+    log_stdout_to_file = True
+  elif hasattr(stdout, 'fileno'):
+    popen_stdout = stdout
+    log_stdout_to_file = True
+  elif isinstance(stdout, bool):
+    # This check must come before isinstance(int) because bool subclasses int.
+    if stdout:
+      popen_stdout = _get_tempfile()
+  elif isinstance(stdout, int):
+    popen_stdout = stdout
+  elif log_output:
+    popen_stdout = _get_tempfile()
+
+  log_stderr_to_file = False
+  if hasattr(stderr, 'fileno'):
+    popen_stderr = stderr
+    log_stderr_to_file = True
+  elif isinstance(stderr, bool):
+    # This check must come before isinstance(int) because bool subclasses int.
+    if stderr:
+      popen_stderr = _get_tempfile()
+  elif isinstance(stderr, int):
+    popen_stderr = stderr
+  elif log_output:
+    popen_stderr = _get_tempfile()
+
+  # If subprocesses have direct access to stdout or stderr, they can bypass
+  # our buffers, so we need to flush to ensure that output is not interleaved.
+  if popen_stdout is None or popen_stderr is None:
+    sys.stdout.flush()
+    sys.stderr.flush()
+
+  # If input is a string, we'll create a pipe and send it through that.
+  # Otherwise we assume it's a file object that can be read from directly.
+  if isinstance(input, (six.string_types, six.binary_type)):
+    stdin = subprocess.PIPE
+    # Allow people to always pass in bytes or strings regardless of encoding.
+    # Our Popen usage takes care of converting everything to bytes first.
+    #
+    # Linter can't see that we're using |input| as a var, not a builtin.
+    # pylint: disable=input-builtin
+    if encoding and isinstance(input, six.text_type):
+      input = input.encode(encoding, errors)
+    elif not encoding and isinstance(input, six.text_type):
+      input = input.encode('utf-8')
+  elif input is not None:
+    stdin = input
+    input = None
+
+  # Sanity check the command.  This helps when RunCommand is deep in the call
+  # chain, but the command itself was constructed along the way.
+  if isinstance(cmd, (six.string_types, six.binary_type)):
+    if not shell:
+      raise ValueError('Cannot run a string command without a shell')
+    cmd = ['/bin/bash', '-c', cmd]
+    shell = False
+  elif shell:
+    raise ValueError('Cannot run an array command with a shell')
+  elif not cmd:
+    raise ValueError('Missing command to run')
+  elif not isinstance(cmd, (list, tuple)):
+    raise TypeError('cmd must be list or tuple, not %s: %r' %
+                    (type(cmd), repr(cmd)))
+  elif not all(isinstance(x, (six.binary_type, six.string_types)) for x in cmd):
+    raise TypeError('All command elements must be bytes/strings: %r' % (cmd,))
+
+  # If we are using enter_chroot we need to use enterchroot pass env through
+  # to the final command.
+  env = env.copy() if env is not None else os.environ.copy()
+  # Looking at localized error messages may be unexpectedly dangerous, so we
+  # set LC_MESSAGES=C to make sure the output of commands is safe to inspect.
+  env['LC_MESSAGES'] = 'C'
+  env.update(extra_env if extra_env else {})
+
+  if enter_chroot and not IsInsideChroot():
+    wrapper = ['cros_sdk']
+    if cwd:
+      # If the current working directory is set, try to find cros_sdk relative
+      # to cwd. Generally cwd will be the buildroot therefore we want to use
+      # {cwd}/chromite/bin/cros_sdk. For more info PTAL at crbug.com/432620
+      path = os.path.join(cwd, constants.CHROMITE_BIN_SUBDIR, 'cros_sdk')
+      if os.path.exists(path):
+        wrapper = [path]
+
+    if chroot_args:
+      wrapper += chroot_args
+
+    if extra_env:
+      wrapper.extend('%s=%s' % (k, v) for k, v in extra_env.items())
+
+    cmd = wrapper + ['--'] + cmd
+
+  for var in constants.ENV_PASSTHRU:
+    if var not in env and var in os.environ:
+      env[var] = os.environ[var]
+
+  # Print out the command before running.
+  if dryrun or print_cmd or log_output:
+    log = ''
+    if dryrun:
+      log += '(dryrun) '
+    log += 'run: %s' % (CmdToStr(cmd),)
+    if cwd:
+      log += ' in %s' % (cwd,)
+    logging.log(debug_level, '%s', log)
+
+  cmd_result.args = cmd
+
+  # We want to still something in dryrun mode so we process all the options
+  # and return appropriate values (e.g. output with correct encoding).
+  popen_cmd = ['true'] if dryrun else cmd
+
+  proc = None
+  # Verify that the signals modules is actually usable, and won't segfault
+  # upon invocation of getsignal.  See signals.SignalModuleUsable for the
+  # details and upstream python bug.
+  use_signals = signals.SignalModuleUsable()
+  try:
+    proc = _Popen(popen_cmd, cwd=cwd, stdin=stdin, stdout=popen_stdout,
+                  stderr=popen_stderr, shell=False, env=env,
+                  close_fds=True)
+
+    if use_signals:
+      if ignore_sigint:
+        old_sigint = signal.signal(signal.SIGINT, signal.SIG_IGN)
+      else:
+        old_sigint = signal.getsignal(signal.SIGINT)
+        signal.signal(signal.SIGINT,
+                      functools.partial(_KillChildProcess, proc, int_timeout,
+                                        kill_timeout, cmd, old_sigint))
+
+      old_sigterm = signal.getsignal(signal.SIGTERM)
+      signal.signal(signal.SIGTERM,
+                    functools.partial(_KillChildProcess, proc, int_timeout,
+                                      kill_timeout, cmd, old_sigterm))
+
+    try:
+      (cmd_result.stdout, cmd_result.stderr) = proc.communicate(input)
+    finally:
+      if use_signals:
+        signal.signal(signal.SIGINT, old_sigint)
+        signal.signal(signal.SIGTERM, old_sigterm)
+
+      if (popen_stdout and not isinstance(popen_stdout, int) and
+          not log_stdout_to_file):
+        popen_stdout.seek(0)
+        cmd_result.stdout = popen_stdout.read()
+        popen_stdout.close()
+      elif log_stdout_to_file:
+        popen_stdout.close()
+
+      if (popen_stderr and not isinstance(popen_stderr, int) and
+          not log_stderr_to_file):
+        popen_stderr.seek(0)
+        cmd_result.stderr = popen_stderr.read()
+        popen_stderr.close()
+
+    cmd_result.returncode = proc.returncode
+
+    # The try/finally block is a bit hairy.  We normally want the logged
+    # output to be what gets passed back up.  But if there's a decode error,
+    # we don't want it to break logging entirely.  If the output had a lot of
+    # newlines, always logging it as bytes wouldn't be human readable.
+    try:
+      if encoding:
+        if cmd_result.stdout is not None:
+          cmd_result.stdout = cmd_result.stdout.decode(encoding, errors)
+        if cmd_result.stderr is not None:
+          cmd_result.stderr = cmd_result.stderr.decode(encoding, errors)
+    finally:
+      if log_output:
+        if cmd_result.stdout:
+          logging.log(debug_level, '(stdout):\n%s', cmd_result.stdout)
+        if cmd_result.stderr:
+          logging.log(debug_level, '(stderr):\n%s', cmd_result.stderr)
+
+    if check and proc.returncode:
+      msg = 'cmd=%s' % cmd
+      if cwd:
+        msg += ', cwd=%s' % cwd
+      if extra_env:
+        msg += ', extra env=%s' % extra_env
+      raise RunCommandError(msg, cmd_result)
+  except OSError as e:
+    estr = str(e)
+    if e.errno == errno.EACCES:
+      estr += '; does the program need `chmod a+x`?'
+    raise RunCommandError(estr, CommandResult(args=cmd), exception=e)
+  finally:
+    if proc is not None:
+      # Ensure the process is dead.
+      _KillChildProcess(proc, int_timeout, kill_timeout, cmd, None, None, None)
+
+  # We might capture stdout/stderr for internal reasons (like logging), but we
+  # don't want to let it leak back out to the callers.  They only get output if
+  # they explicitly requested it.
+  if stdout is None:
+    cmd_result.stdout = None
+  if stderr is None:
+    cmd_result.stderr = None
+
+  return cmd_result
+# pylint: enable=redefined-builtin
+
+
+# Convenience run methods.
+#
+# We don't use functools.partial because it binds the methods at import time,
+# which doesn't work well with unit tests, since it bypasses the mock that may
+# be set up for run.
+
+def dbg_run(*args, **kwargs):
+  kwargs.setdefault('debug_level', logging.DEBUG)
+  return run(*args, **kwargs)
+
+
+class DieSystemExit(SystemExit):
+  """Custom Exception used so we can intercept this if necessary."""
+
+
+def Die(message, *args, **kwargs):
+  """Emits an error message with a stack trace and halts execution.
+
+  Args:
+    message: The message to be emitted before exiting.
+  """
+  logging.error(message, *args, **kwargs)
+  raise DieSystemExit(1)
+
+
+def GetSysrootToolPath(sysroot, tool_name):
+  """Returns the path to the sysroot specific version of a tool.
+
+  Does not check that the tool actually exists.
+
+  Args:
+    sysroot: build root of the system in question.
+    tool_name: string name of tool desired (e.g. 'equery').
+
+  Returns:
+    string path to tool inside the sysroot.
+  """
+  if sysroot == '/':
+    return os.path.join(sysroot, 'usr', 'bin', tool_name)
+
+  return os.path.join(sysroot, 'build', 'bin', tool_name)
+
+
+def IsInsideChroot():
+  """Returns True if we are inside chroot."""
+  return os.path.exists('/etc/cros_chroot_version')
+
+
+def IsOutsideChroot():
+  """Returns True if we are outside chroot."""
+  return not IsInsideChroot()
+
+
+def AssertInsideChroot():
+  """Die if we are outside the chroot"""
+  if not IsInsideChroot():
+    Die('%s: please run inside the chroot', os.path.basename(sys.argv[0]))
+
+
+def AssertOutsideChroot():
+  """Die if we are inside the chroot"""
+  if IsInsideChroot():
+    Die('%s: please run outside the chroot', os.path.basename(sys.argv[0]))
+
+
+def GetHostName(fully_qualified=False):
+  """Return hostname of current machine, with domain if |fully_qualified|."""
+  hostname = socket.gethostname()
+  try:
+    hostname = socket.gethostbyaddr(hostname)[0]
+  except (socket.gaierror, socket.herror) as e:
+    logging.warning('please check your /etc/hosts file; resolving your hostname'
+                    ' (%s) failed: %s', hostname, e)
+
+  if fully_qualified:
+    return hostname
+  else:
+    return hostname.partition('.')[0]
+
+
+def GetHostDomain():
+  """Return domain of current machine.
+
+  If there is no domain, return 'localdomain'.
+  """
+
+  hostname = GetHostName(fully_qualified=True)
+  domain = hostname.partition('.')[2]
+  return domain if domain else 'localdomain'
+
+
+def HostIsCIBuilder(fq_hostname=None, golo_only=False, gce_only=False):
+  """Return True iff a host is a continuous-integration builder.
+
+  Args:
+    fq_hostname: The fully qualified hostname. By default, we fetch it for you.
+    golo_only: Only return True if the host is in the Chrome Golo. Defaults to
+      False.
+    gce_only: Only return True if the host is in the Chrome GCE block. Defaults
+      to False.
+  """
+  if not fq_hostname:
+    fq_hostname = GetHostName(fully_qualified=True)
+  in_golo = fq_hostname.endswith('.' + constants.GOLO_DOMAIN)
+  in_gce = (fq_hostname.endswith('.' + constants.CHROME_DOMAIN) or
+            fq_hostname.endswith('.' + constants.CHROMEOS_BOT_INTERNAL))
+  if golo_only:
+    return in_golo
+  elif gce_only:
+    return in_gce
+  else:
+    return in_golo or in_gce
+
+
+COMP_NONE = 0
+COMP_GZIP = 1
+COMP_BZIP2 = 2
+COMP_XZ = 3
+
+
+def FindCompressor(compression, chroot=None):
+  """Locate a compressor utility program (possibly in a chroot).
+
+  Since we compress/decompress a lot, make it easy to locate a
+  suitable utility program in a variety of locations.  We favor
+  the one in the chroot over /, and the parallel implementation
+  over the single threaded one.
+
+  Args:
+    compression: The type of compression desired.
+    chroot: Optional path to a chroot to search.
+
+  Returns:
+    Path to a compressor.
+
+  Raises:
+    ValueError: If compression is unknown.
+  """
+  if compression == COMP_XZ:
+    return os.path.join(constants.CHROMITE_SCRIPTS_DIR, 'xz_auto')
+  elif compression == COMP_GZIP:
+    std = 'gzip'
+    para = 'pigz'
+  elif compression == COMP_BZIP2:
+    std = 'bzip2'
+    para = 'pbzip2'
+  elif compression == COMP_NONE:
+    return 'cat'
+  else:
+    raise ValueError('unknown compression')
+
+  roots = []
+  if chroot:
+    roots.append(chroot)
+  roots.append('/')
+
+  for prog in [para, std]:
+    for root in roots:
+      for subdir in ['', 'usr']:
+        path = os.path.join(root, subdir, 'bin', prog)
+        if os.path.exists(path):
+          return path
+
+  return std
+
+
+def CompressionStrToType(s):
+  """Convert a compression string type to a constant.
+
+  Args:
+    s: string to check
+
+  Returns:
+    A constant, or None if the compression type is unknown.
+  """
+  _COMP_STR = {
+      'gz': COMP_GZIP,
+      'bz2': COMP_BZIP2,
+      'xz': COMP_XZ,
+  }
+  if s:
+    return _COMP_STR.get(s)
+  else:
+    return COMP_NONE
+
+
+def CompressionExtToType(file_name):
+  """Retrieve a compression type constant from a compression file's name.
+
+  Args:
+    file_name: Name of a compression file.
+
+  Returns:
+    A constant, return COMP_NONE if the extension is unknown.
+  """
+  ext = os.path.splitext(file_name)[-1]
+  _COMP_EXT = {
+      '.tgz': COMP_GZIP,
+      '.gz': COMP_GZIP,
+      '.tbz2': COMP_BZIP2,
+      '.bz2': COMP_BZIP2,
+      '.txz': COMP_XZ,
+      '.xz': COMP_XZ,
+  }
+  return _COMP_EXT.get(ext, COMP_NONE)
+
+
+def CompressFile(infile, outfile):
+  """Compress a file using compressor specified by |outfile| suffix.
+
+  Args:
+    infile: File to compress.
+    outfile: Name of output file. Compression used is based on the
+             type of suffix of the name specified (e.g.: .bz2).
+  """
+  comp_type = CompressionExtToType(outfile)
+  assert comp_type and comp_type != COMP_NONE
+  comp = FindCompressor(comp_type)
+  if os.path.basename(comp) == 'pixz':
+    # pixz does not accept '-c'; instead an explicit '-i' indicates input file
+    # should not be deleted, and '-o' specifies output file.
+    cmd = [comp, '-i', infile, '-o', outfile]
+    run(cmd)
+  else:
+    cmd = [comp, '-c', infile]
+    run(cmd, stdout=outfile)
+
+
+def UncompressFile(infile, outfile):
+  """Uncompress a file using compressor specified by |infile| suffix.
+
+  Args:
+    infile: File to uncompress. Compression used is based on the
+            type of suffix of the name specified (e.g.: .bz2).
+    outfile: Name of output file.
+  """
+  comp_type = CompressionExtToType(infile)
+  assert comp_type and comp_type != COMP_NONE
+  comp = FindCompressor(comp_type)
+  if os.path.basename(comp) == 'pixz':
+    # pixz does not accept '-c'; instead an explicit '-i' indicates input file
+    # should not be deleted, and '-o' specifies output file.
+    cmd = [comp, '-d', '-i', infile, '-o', outfile]
+    run(cmd)
+  else:
+    cmd = [comp, '-dc', infile]
+    run(cmd, stdout=outfile)
+
+
+class CreateTarballError(RunCommandError):
+  """Error while running tar.
+
+  We may run tar multiple times because of "soft" errors.  The result is from
+  the last run instance.
+  """
+
+
+def CreateTarball(target, cwd, sudo=False, compression=COMP_XZ, chroot=None,
+                  inputs=None, timeout=300, extra_args=None, **kwargs):
+  """Create a tarball.  Executes 'tar' on the commandline.
+
+  Args:
+    target: The path of the tar file to generate.
+    cwd: The directory to run the tar command.
+    sudo: Whether to run with "sudo".
+    compression: The type of compression desired.  See the FindCompressor
+      function for details.
+    chroot: See FindCompressor().
+    inputs: A list of files or directories to add to the tarball.  If unset,
+      defaults to ".".
+    timeout: The number of seconds to wait on soft failure.
+    extra_args: A list of extra args to pass to "tar".
+    kwargs: Any run options/overrides to use.
+
+  Returns:
+    The cmd_result object returned by the run invocation.
+
+  Raises:
+    CreateTarballError: if the tar command failed, possibly after retry.
+  """
+  if inputs is None:
+    inputs = ['.']
+
+  if extra_args is None:
+    extra_args = []
+  kwargs.setdefault('debug_level', logging.INFO)
+
+  comp = FindCompressor(compression, chroot=chroot)
+  cmd = (['tar'] +
+         extra_args +
+         ['--sparse', '-I', comp, '-cf', target])
+  if len(inputs) > _THRESHOLD_TO_USE_T_FOR_TAR:
+    cmd += ['--null', '-T', '/dev/stdin']
+    rc_input = b'\0'.join(x.encode('utf-8') for x in inputs)
+  else:
+    cmd += list(inputs)
+    rc_input = None
+
+  rc_func = sudo_run if sudo else run
+
+  # If tar fails with status 1, retry twice. Once after timeout seconds and
+  # again 2*timeout seconds after that.
+  for try_count in range(3):
+    try:
+      result = rc_func(cmd, cwd=cwd, **dict(kwargs, check=False,
+                                            input=rc_input))
+    except RunCommandError as rce:
+      # There are cases where run never executes the command (cannot find tar,
+      # cannot execute tar, such as when cwd does not exist). Although the run
+      # command will show low-level problems, we also want to log the context
+      # of what CreateTarball was trying to do.
+      logging.error('CreateTarball unable to run tar for %s in %s. cmd={%s}',
+                    target, cwd, cmd)
+      raise rce
+    if result.returncode == 0:
+      return result
+    if result.returncode != 1 or try_count > 1:
+      # Since the build is abandoned at this point, we will take 5
+      # entire minutes to track down the competing process.
+      # Error will have the low-level tar command error, so log the context
+      # of the tar command (target file, current working dir).
+      logging.error('CreateTarball failed creating %s in %s. cmd={%s}',
+                    target, cwd, cmd)
+      raise CreateTarballError('CreateTarball', result)
+
+    assert result.returncode == 1
+    time.sleep(timeout * (try_count + 1))
+    logging.warning('CreateTarball: tar: source modification time changed '
+                    '(see crbug.com/547055), retrying')
+    logging.PrintBuildbotStepWarnings()
+
+
+def GetInput(prompt):
+  """Helper function to grab input from a user.   Makes testing easier."""
+  # We have people use GetInput() so they don't have to use these bad builtins
+  # themselves or deal with version skews.
+  # pylint: disable=bad-builtin,input-builtin,raw_input-builtin,undefined-variable
+  if sys.version_info.major < 3:
+    return raw_input(prompt)
+  else:
+    return input(prompt)
+
+
+def GetChoice(title, options, group_size=0):
+  """Ask user to choose an option from the list.
+
+  When |group_size| is 0, then all items in |options| will be extracted and
+  shown at the same time.  Otherwise, the items will be extracted |group_size|
+  at a time, and then shown to the user.  This makes it easier to support
+  generators that are slow, extremely large, or people usually want to pick
+  from the first few choices.
+
+  Args:
+    title: The text to display before listing options.
+    options: Iterable which provides options to display.
+    group_size: How many options to show before asking the user to choose.
+
+  Returns:
+    An integer of the index in |options| the user picked.
+  """
+  def PromptForChoice(max_choice, more):
+    prompt = 'Please choose an option [0-%d]' % max_choice
+    if more:
+      prompt += ' (Enter for more options)'
+    prompt += ': '
+
+    while True:
+      choice = GetInput(prompt)
+      if more and not choice.strip():
+        return None
+      try:
+        choice = int(choice)
+      except ValueError:
+        print('Input is not an integer')
+        continue
+      if choice < 0 or choice > max_choice:
+        print('Choice %d out of range (0-%d)' % (choice, max_choice))
+        continue
+      return choice
+
+  print(title)
+  max_choice = 0
+  for i, opt in enumerate(options):
+    if i and group_size and not i % group_size:
+      choice = PromptForChoice(i - 1, True)
+      if choice is not None:
+        return choice
+    print('  [%d]: %s' % (i, opt))
+    max_choice = i
+
+  return PromptForChoice(max_choice, False)
+
+
+def BooleanPrompt(prompt='Do you want to continue?', default=True,
+                  true_value='yes', false_value='no', prolog=None):
+  """Helper function for processing boolean choice prompts.
+
+  Args:
+    prompt: The question to present to the user.
+    default: Boolean to return if the user just presses enter.
+    true_value: The text to display that represents a True returned.
+    false_value: The text to display that represents a False returned.
+    prolog: The text to display before prompt.
+
+  Returns:
+    True or False.
+  """
+  true_value, false_value = true_value.lower(), false_value.lower()
+  true_text, false_text = true_value, false_value
+  if true_value == false_value:
+    raise ValueError('true_value and false_value must differ: got %r'
+                     % true_value)
+
+  if default:
+    true_text = true_text[0].upper() + true_text[1:]
+  else:
+    false_text = false_text[0].upper() + false_text[1:]
+
+  prompt = ('\n%s (%s/%s)? ' % (prompt, true_text, false_text))
+
+  if prolog:
+    prompt = ('\n%s\n%s' % (prolog, prompt))
+
+  while True:
+    try:
+      response = GetInput(prompt).lower()
+    except EOFError:
+      # If the user hits CTRL+D, or stdin is disabled, use the default.
+      print()
+      response = None
+    except KeyboardInterrupt:
+      # If the user hits CTRL+C, just exit the process.
+      print()
+      Die('CTRL+C detected; exiting')
+
+    if not response:
+      return default
+    if true_value.startswith(response):
+      if not false_value.startswith(response):
+        return True
+      # common prefix between the two...
+    elif false_value.startswith(response):
+      return False
+
+
+def BooleanShellValue(sval, default, msg=None):
+  """See if the string value is a value users typically consider as boolean
+
+  Often times people set shell variables to different values to mean "true"
+  or "false".  For example, they can do:
+    export FOO=yes
+    export BLAH=1
+    export MOO=true
+  Handle all that user ugliness here.
+
+  If the user picks an invalid value, you can use |msg| to display a non-fatal
+  warning rather than raising an exception.
+
+  Args:
+    sval: The string value we got from the user.
+    default: If we can't figure out if the value is true or false, use this.
+    msg: If |sval| is an unknown value, use |msg| to warn the user that we
+         could not decode the input.  Otherwise, raise ValueError().
+
+  Returns:
+    The interpreted boolean value of |sval|.
+
+  Raises:
+    ValueError() if |sval| is an unknown value and |msg| is not set.
+  """
+  if sval is None:
+    return default
+
+  if isinstance(sval, six.string_types):
+    s = sval.lower()
+    if s in ('yes', 'y', '1', 'true'):
+      return True
+    elif s in ('no', 'n', '0', 'false'):
+      return False
+
+  if msg is not None:
+    logging.warning('%s: %r', msg, sval)
+    return default
+  else:
+    raise ValueError('Could not decode as a boolean value: %r' % sval)
+
+
+# Suppress whacked complaints about abstract class being unused.
+class MasterPidContextManager(object):
+  """Allow context managers to restrict their exit to within the same PID."""
+
+  # In certain cases we actually want this ran outside
+  # of the main pid- specifically in backup processes
+  # doing cleanup.
+  ALTERNATE_MASTER_PID = None
+
+  def __init__(self):
+    self._invoking_pid = None
+
+  def __enter__(self):
+    self._invoking_pid = os.getpid()
+    return self._enter()
+
+  def __exit__(self, exc_type, exc, exc_tb):
+    curpid = os.getpid()
+    if curpid == self.ALTERNATE_MASTER_PID:
+      self._invoking_pid = curpid
+    if curpid == self._invoking_pid:
+      return self._exit(exc_type, exc, exc_tb)
+
+  def _enter(self):
+    raise NotImplementedError(self, '_enter')
+
+  def _exit(self, exc_type, exc, exc_tb):
+    raise NotImplementedError(self, '_exit')
+
+
+class ContextManagerStack(object):
+  """Context manager that is designed to safely allow nesting and stacking.
+
+  Python2.7 directly supports a with syntax generally removing the need for
+  this, although this form avoids indentation hell if there is a lot of context
+  managers.  It also permits more programmatic control and allowing conditional
+  usage.
+
+  For Python2.6, see http://docs.python.org/library/contextlib.html; the short
+  version is that there is a race in the available stdlib/language rules under
+  2.6 when dealing w/ multiple context managers, thus this safe version was
+  added.
+
+  For each context manager added to this instance, it will unwind them,
+  invoking them as if it had been constructed as a set of manually nested
+  with statements.
+  """
+
+  def __init__(self):
+    self._stack = []
+
+  def Add(self, functor, *args, **kwargs):
+    """Add a context manager onto the stack.
+
+    Usage of this is essentially the following:
+    >>> stack.add(Timeout, 60)
+
+    It must be done in this fashion, else there is a mild race that exists
+    between context manager instantiation and initial __enter__.
+
+    Invoking it in the form specified eliminates that race.
+
+    Args:
+      functor: A callable to instantiate a context manager.
+      args and kwargs: positional and optional args to functor.
+
+    Returns:
+      The newly created (and __enter__'d) context manager.
+      Note: This is not the same value as the "with" statement -- that returns
+      the value from the __enter__ function while this is the manager itself.
+    """
+    obj = None
+    try:
+      obj = functor(*args, **kwargs)
+      return obj
+    finally:
+      if obj is not None:
+        obj.__enter__()
+        self._stack.append(obj)
+
+  def __enter__(self):
+    # Nothing to do in this case.  The individual __enter__'s are done
+    # when the context managers are added, which will likely be after
+    # the __enter__ method of this stack is called.
+    return self
+
+  def __exit__(self, exc_type, exc, exc_tb):
+    # Exit each context manager in stack in reverse order, tracking the results
+    # to know whether or not to suppress the exception raised (or to switch that
+    # exception to a new one triggered by an individual handler's __exit__).
+    for handler in reversed(self._stack):
+      # pylint: disable=bare-except
+      try:
+        if handler.__exit__(exc_type, exc, exc_tb):
+          exc_type = exc = exc_tb = None
+      except:
+        exc_type, exc, exc_tb = sys.exc_info()
+
+    self._stack = []
+
+    # Return True if any exception was handled.
+    if all(x is None for x in (exc_type, exc, exc_tb)):
+      return True
+
+    # Raise any exception that is left over from exiting all context managers.
+    # Normally a single context manager would return False to allow caller to
+    # re-raise the exception itself, but here the exception might have been
+    # raised during the exiting of one of the individual context managers.
+    six.reraise(exc_type, exc, exc_tb)
+
+
+def iflatten_instance(iterable,
+                      terminate_on_kls=(six.string_types, six.binary_type)):
+  """Derivative of snakeoil.lists.iflatten_instance; flatten an object.
+
+  Given an object, flatten it into a single depth iterable-
+  stopping descent on objects that either aren't iterable, or match
+  isinstance(obj, terminate_on_kls).
+
+  Examples:
+    >>> print list(iflatten_instance([1, 2, "as", ["4", 5]))
+    [1, 2, "as", "4", 5]
+  """
+  def descend_into(item):
+    if isinstance(item, terminate_on_kls):
+      return False
+    try:
+      iter(item)
+    except TypeError:
+      return False
+    # Note strings can be infinitely descended through- thus this
+    # recursion limiter.
+    return not isinstance(item, six.string_types) or len(item) > 1
+
+  if not descend_into(iterable):
+    yield iterable
+    return
+  for item in iterable:
+    if not descend_into(item):
+      yield item
+    else:
+      for subitem in iflatten_instance(item, terminate_on_kls):
+        yield subitem
+
+
[email protected]
+def Open(obj, mode='r'):
+  """Convenience ctx that accepts a file path or an already open file object."""
+  if isinstance(obj, six.string_types):
+    with open(obj, mode=mode) as f:
+      yield f
+  else:
+    yield obj
+
+
+def SafeRun(functors, combine_exceptions=False):
+  """Executes a list of functors, continuing on exceptions.
+
+  Args:
+    functors: An iterable of functors to call.
+    combine_exceptions: If set, and multiple exceptions are encountered,
+      SafeRun will raise a RuntimeError containing a list of all the exceptions.
+      If only one exception is encountered, then the default behavior of
+      re-raising the original exception with unmodified stack trace will be
+      kept.
+
+  Raises:
+    The first exception encountered, with corresponding backtrace, unless
+    |combine_exceptions| is specified and there is more than one exception
+    encountered, in which case a RuntimeError containing a list of all the
+    exceptions that were encountered is raised.
+  """
+  errors = []
+
+  for f in functors:
+    try:
+      f()
+    except Exception as e:
+      # Append the exception object and the traceback.
+      errors.append((e, sys.exc_info()[2]))
+
+  if errors:
+    if len(errors) == 1 or not combine_exceptions:
+      # To preserve the traceback.
+      inst, tb = errors[0]
+      six.reraise(type(inst), inst, tb)
+    else:
+      raise RuntimeError([e[0] for e in errors])
+
+
+def UserDateTimeFormat(timeval=None):
+  """Format a date meant to be viewed by a user
+
+  The focus here is to have a format that is easily readable by humans,
+  but still easy (and unambiguous) for a machine to parse.  Hence, we
+  use the RFC 2822 date format (with timezone name appended).
+
+  Args:
+    timeval: Either a datetime object or a floating point time value as accepted
+             by gmtime()/localtime().  If None, the current time is used.
+
+  Returns:
+    A string format such as 'Wed, 20 Feb 2013 15:25:15 -0500 (EST)'
+  """
+  if isinstance(timeval, datetime):
+    timeval = time.mktime(timeval.timetuple())
+  return '%s (%s)' % (email.utils.formatdate(timeval=timeval, localtime=True),
+                      time.strftime('%Z', time.localtime(timeval)))
+
+
+def ParseUserDateTimeFormat(time_string):
+  """Parse a time string into a floating point time value.
+
+  This function is essentially the inverse of UserDateTimeFormat.
+
+  Args:
+    time_string: A string datetime represetation in RFC 2822 format, such as
+                 'Wed, 20 Feb 2013 15:25:15 -0500 (EST)'.
+
+  Returns:
+    Floating point Unix timestamp (seconds since epoch).
+  """
+  return email.utils.mktime_tz(email.utils.parsedate_tz(time_string))
+
+
+def GetDefaultBoard():
+  """Gets the default board.
+
+  Returns:
+    The default board (as a string), or None if either the default board
+    file was missing or malformed.
+  """
+  default_board_file_name = os.path.join(constants.SOURCE_ROOT, 'src',
+                                         'scripts', '.default_board')
+  try:
+    with open(default_board_file_name) as default_board_file:
+      default_board = default_board_file.read().strip()
+      # Check for user typos like whitespace
+      if not re.match('[a-zA-Z0-9-_]*$', default_board):
+        logging.warning('Noticed invalid default board: |%s|. Ignoring this '
+                        'default.', default_board)
+        default_board = None
+  except IOError:
+    return None
+
+  return default_board
+
+
+def SetDefaultBoard(board):
+  """Set the default board.
+
+  Args:
+    board (str): The name of the board to save as the default.
+
+  Returns:
+    bool - True if successfully wrote default, False otherwise.
+  """
+  config_path = os.path.join(constants.CROSUTILS_DIR, '.default_board')
+  try:
+    with open(config_path, 'w') as f:
+      f.write(board)
+  except IOError as e:
+    logging.error('Unable to write default board: %s', e)
+    return False
+
+  return True
+
+
+def GetBoard(device_board, override_board=None, force=False, strict=False):
+  """Gets the board name to use.
+
+  Ask user to confirm when |override_board| and |device_board| are
+  both None.
+
+  Args:
+    device_board: The board detected on the device.
+    override_board: Overrides the board.
+    force: Force using the default board if |device_board| is None.
+    strict: If True, abort if no valid board can be found.
+
+  Returns:
+    Returns the first non-None board in the following order:
+    |override_board|, |device_board|, and GetDefaultBoard().
+
+  Raises:
+    DieSystemExit: If board is not set or user enters no.
+  """
+  if override_board:
+    return override_board
+
+  board = device_board or GetDefaultBoard()
+  if not device_board:
+    if not board and strict:
+      Die('No board specified and no default board found.')
+    msg = 'Cannot detect board name; using default board %s.' % board
+    if not force and not BooleanPrompt(default=False, prolog=msg):
+      Die('Exiting...')
+
+    logging.warning(msg)
+
+  return board
+
+
+# Structure to hold the values produced by TimedSection.
+#
+#  Attributes:
+#    start: The absolute start time as a datetime.
+#    finish: The absolute finish time as a datetime, or None if in progress.
+#    delta: The runtime as a timedelta, or None if in progress.
+TimedResults = cros_collections.Collection(
+    'TimedResults', start=None, finish=None, delta=None)
+
+
[email protected]
+def TimedSection():
+  """Context manager to time how long a code block takes.
+
+  Examples:
+    with cros_build_lib.TimedSection() as timer:
+      DoWork()
+    logging.info('DoWork took %s', timer.delta)
+
+  Context manager value will be a TimedResults instance.
+  """
+  # Create our context manager value.
+  times = TimedResults(start=datetime.now())
+  try:
+    yield times
+  finally:
+    times.finish = datetime.now()
+    times.delta = times.finish - times.start
+
+
+def GetRandomString():
+  """Returns a random string.
+
+  It will be 32 characters long, although callers shouldn't rely on this.
+  Only lowercase & numbers are used to avoid case-insensitive collisions.
+  """
+  # Start with current time.  This "scopes" the following random data.
+  stamp = b'%x' % int(time.time())
+  # Add in some entropy.  This reads more bytes than strictly necessary, but
+  # it guarantees that we always have enough bytes below.
+  data = os.urandom(16)
+  # Then convert it to a lowercase base32 string of 32 characters.
+  return base64.b32encode(stamp + data).decode('utf-8')[0:32].lower()
+
+
+def MachineDetails():
+  """Returns a string to help identify the source of a job.
+
+  This is not meant for machines to parse; instead, we want content that is easy
+  for humans to read when trying to figure out where "something" is coming from.
+  For example, when a service has grabbed a lock in Google Storage, and we want
+  to see what process actually triggered that (in case it is a test gone rogue),
+  the content in here should help triage.
+
+  Note: none of the details included may be secret so they can be freely pasted
+  into bug reports/chats/logs/etc...
+
+  Note: this content should not be large
+
+  Returns:
+    A string with content that helps identify this system/process/etc...
+  """
+  return '\n'.join((
+      'PROG=%s' % inspect.stack()[-1][1],
+      'USER=%s' % getpass.getuser(),
+      'HOSTNAME=%s' % GetHostName(fully_qualified=True),
+      'PID=%s' % os.getpid(),
+      'TIMESTAMP=%s' % UserDateTimeFormat(),
+      'RANDOM_JUNK=%s' % GetRandomString(),
+  )) + '\n'
+
+
+def UnbufferedTemporaryFile(**kwargs):
+  """Handle buffering changes in tempfile.TemporaryFile."""
+  assert 'bufsize' not in kwargs
+  assert 'buffering' not in kwargs
+  if sys.version_info.major < 3:
+    kwargs['bufsize'] = 0
+  else:
+    kwargs['buffering'] = 0
+  return tempfile.TemporaryFile(**kwargs)
+
+
+def UnbufferedNamedTemporaryFile(**kwargs):
+  """Handle buffering changes in tempfile.NamedTemporaryFile."""
+  assert 'bufsize' not in kwargs
+  assert 'buffering' not in kwargs
+  if sys.version_info.major < 3:
+    kwargs['bufsize'] = 0
+  else:
+    kwargs['buffering'] = 0
+  return tempfile.NamedTemporaryFile(**kwargs)
diff --git a/utils/frozen_chromite/lib/cros_collections.py b/utils/frozen_chromite/lib/cros_collections.py
new file mode 100644
index 0000000..d63c6db
--- /dev/null
+++ b/utils/frozen_chromite/lib/cros_collections.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Chromite extensions on top of the collections module."""
+
+from __future__ import print_function
+
+
+def _CollectionExec(expr, classname):
+  """Hack to workaround <=Python-2.7.8 exec bug.
+
+  See https://bugs.python.org/issue21591 for details.
+
+  TODO(crbug.com/998624): Drop this in Jan 2020.
+  """
+  namespace = {}
+  exec(expr, {}, namespace)  # pylint: disable=exec-used
+  return namespace[classname]
+
+
+# We have nested kwargs below, so disable the |kwargs| naming here.
+# pylint: disable=docstring-misnamed-args
+def Collection(classname, **default_kwargs):
+  """Create a new class with mutable named members.
+
+  This is like collections.namedtuple, but mutable.  Also similar to the
+  python 3.3 types.SimpleNamespace.
+
+  Examples:
+    # Declare default values for this new class.
+    Foo = cros_build_lib.Collection('Foo', a=0, b=10)
+    # Create a new class but set b to 4.
+    foo = Foo(b=4)
+    # Print out a (will be the default 0) and b (will be 4).
+    print('a = %i, b = %i' % (foo.a, foo.b))
+  """
+
+  def sn_init(self, **kwargs):
+    """The new class's __init__ function."""
+    # First verify the kwargs don't have excess settings.
+    valid_keys = set(self.__slots__)
+    these_keys = set(kwargs.keys())
+    invalid_keys = these_keys - valid_keys
+    if invalid_keys:
+      raise TypeError('invalid keyword arguments for this object: %r' %
+                      invalid_keys)
+
+    # Now initialize this object.
+    for k in valid_keys:
+      setattr(self, k, kwargs.get(k, default_kwargs[k]))
+
+  def sn_repr(self):
+    """The new class's __repr__ function."""
+    return '%s(%s)' % (classname, ', '.join(
+        '%s=%r' % (k, getattr(self, k)) for k in self.__slots__))
+
+  # Give the new class a unique name and then generate the code for it.
+  classname = 'Collection_%s' % classname
+  expr = '\n'.join((
+      'class %(classname)s(object):',
+      '  __slots__ = ["%(slots)s"]',
+  )) % {
+      'classname': classname,
+      'slots': '", "'.join(sorted(default_kwargs)),
+  }
+
+  # Create the class in a local namespace as exec requires.
+  new_class = _CollectionExec(expr, classname)
+
+  # Bind the helpers.
+  new_class.__init__ = sn_init
+  new_class.__repr__ = sn_repr
+
+  return new_class
+# pylint: enable=docstring-misnamed-args
+
+
+def GroupByKey(input_iter, key):
+  """Split an iterable of dicts, based on value of a key.
+
+  GroupByKey([{'a': 1}, {'a': 2}, {'a': 1, 'b': 2}], 'a') =>
+    {1: [{'a': 1}, {'a': 1, 'b': 2}], 2: [{'a': 2}]}
+
+  Args:
+    input_iter: An iterable of dicts.
+    key: A string specifying the key name to split by.
+
+  Returns:
+    A dictionary, mapping from each unique value for |key| that
+    was encountered in |input_iter| to a list of entries that had
+    that value.
+  """
+  split_dict = dict()
+  for entry in input_iter:
+    split_dict.setdefault(entry.get(key), []).append(entry)
+  return split_dict
+
+
+def GroupNamedtuplesByKey(input_iter, key):
+  """Split an iterable of namedtuples, based on value of a key.
+
+  Args:
+    input_iter: An iterable of namedtuples.
+    key: A string specifying the key name to split by.
+
+  Returns:
+    A dictionary, mapping from each unique value for |key| that
+    was encountered in |input_iter| to a list of entries that had
+    that value.
+  """
+  split_dict = {}
+  for entry in input_iter:
+    split_dict.setdefault(getattr(entry, key, None), []).append(entry)
+  return split_dict
+
+
+def InvertDictionary(origin_dict):
+  """Invert the key value mapping in the origin_dict.
+
+  Given an origin_dict {'key1': {'val1', 'val2'}, 'key2': {'val1', 'val3'},
+  'key3': {'val3'}}, the returned inverted dict will be
+  {'val1': {'key1', 'key2'}, 'val2': {'key1'}, 'val3': {'key2', 'key3'}}
+
+  Args:
+    origin_dict: A dict mapping each key to a group (collection) of values.
+
+  Returns:
+    An inverted dict mapping each key to a set of its values.
+  """
+  new_dict = {}
+  for origin_key, origin_values in origin_dict.items():
+    for origin_value in origin_values:
+      new_dict.setdefault(origin_value, set()).add(origin_key)
+
+  return new_dict
diff --git a/utils/frozen_chromite/lib/cros_logging.py b/utils/frozen_chromite/lib/cros_logging.py
new file mode 100644
index 0000000..778eef9
--- /dev/null
+++ b/utils/frozen_chromite/lib/cros_logging.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Logging module to be used by all scripts.
+
+cros_logging is a wrapper around logging with additional support for NOTICE
+level. This is to be used instead of the default logging module. The new
+logging level can only be used from here.
+
+The log levels should be used as follows:
+
+DEBUG: Enabled on the CLI with --debug. This is the noisiest logging level.
+Often, as the name suggests, it may contain debugging information you wouldn't
+otherwise need.
+
+INFO: Enabled on the CLI with --verbose. Logging at this level should contain
+relatively fine-grained info about the steps the process is performing, but
+should be light on details (which should be in debug).
+
+NOTICE: The default log level. It should relay a high level overview of what
+the process is doing. It should NOT be a noisy output.
+
+WARNING: Unexpected scenarios that are well handled and do not interrupt the
+process, things like retrying an operation or missing optional information
+needed to complete a portion of a process.
+
+ERROR: Problems that are fatal to a specific operation or script, e.g.
+unable to read a file or invalid arguments.
+
+CRITICAL/FATAL: Rarely needed. These should reflect an extraordinary error that
+might require the shutdown of an application or lead to data loss.
+
+WARNING, ERROR, CRITICAL/FATAL: These levels are always included in the above
+levels as one would expect. Limiting the output of a script to just these log
+levels is rarely desirable, but the --log-level argument can be used to do so.
+"""
+
+from __future__ import print_function
+
+import sys
+# pylint: disable=unused-wildcard-import, wildcard-import
+from logging import *
+# pylint: enable=unused-wildcard-import, wildcard-import
+
+# Have to import shutdown explicitly from logging because it is not included
+# in logging's __all__.
+# pylint: disable=unused-import
+from logging import shutdown
+# pylint: enable=unused-import
+
+# Import as private to avoid polluting module namespace.
+from autotest_lib.utils.frozen_chromite.lib import buildbot_annotations as _annotations
+
+
+# Remove deprecated APIs to force use of new ones.
+del WARN
+del warn
+
+
+# Notice Level.
+NOTICE = 25
+addLevelName(NOTICE, 'NOTICE')
+
+
+# Notice implementation.
+def notice(message, *args, **kwargs):
+  """Log 'msg % args' with severity 'NOTICE'."""
+  log(NOTICE, message, *args, **kwargs)
+
+
+# Only buildbot aware entry-points need to spew buildbot specific logs. Require
+# user action for the special log lines.
+_buildbot_markers_enabled = False
+def EnableBuildbotMarkers():
+  # pylint: disable=global-statement
+  global _buildbot_markers_enabled
+  _buildbot_markers_enabled = True
+
+
+def _PrintForBuildbot(handle, annotation_class, *args):
+  """Log a line for buildbot.
+
+  This function dumps a line to log recognizable by buildbot if
+  EnableBuildbotMarkers has been called. Otherwise, it dumps the same line in a
+  human friendly way that buildbot ignores.
+
+  Args:
+    handle: The pipe to dump the log to. If None, log to sys.stderr.
+    annotation_class: Annotation subclass for the type of buildbot log.
+    buildbot_tag: A tag specifying the type of buildbot log.
+    *args: The rest of the str arguments to be dumped to the log.
+  """
+  if handle is None:
+    handle = sys.stderr
+  if annotation_class == _annotations.SetEmailNotifyProperty:
+    annotation = annotation_class(*args)
+  else:
+    # Cast each argument, because we end up getting all sorts of objects from
+    # callers.
+    str_args = [str(x) for x in args]
+    annotation = annotation_class(*str_args)
+  if _buildbot_markers_enabled:
+    line = str(annotation)
+  else:
+    line = annotation.human_friendly
+  handle.write('\n' + line + '\n')
+
+
+def PrintBuildbotLink(text, url, handle=None):
+  """Prints out a link to buildbot."""
+  _PrintForBuildbot(handle, _annotations.StepLink, text, url)
+
+
+def PrintKitchenSetBuildProperty(name, data, handle=None):
+  """Prints out a request to set a build property to a JSON value."""
+  _PrintForBuildbot(handle, _annotations.SetBuildProperty, name, data)
+
+
+def PrintKitchenSetEmailNotifyProperty(name, data, handle=None):
+  """Prints out a request to set an email_notify build property."""
+  _PrintForBuildbot(handle, _annotations.SetEmailNotifyProperty, name, data)
+
+
+def PrintBuildbotStepText(text, handle=None):
+  """Prints out stage text to buildbot."""
+  _PrintForBuildbot(handle, _annotations.StepText, text)
+
+
+def PrintBuildbotStepWarnings(handle=None):
+  """Marks a stage as having warnings."""
+  PrintBuildbotStepText('[FAILED BUT FORGIVEN]', handle=handle)
+  # Warnings not supported by LUCI, so working around until re-added.
+  _PrintForBuildbot(handle, _annotations.StepWarnings)
+
+
+def PrintBuildbotStepFailure(handle=None):
+  """Marks a stage as having failures."""
+  _PrintForBuildbot(handle, _annotations.StepFailure)
+
+
+def PrintBuildbotStepName(name, handle=None):
+  """Marks a step name for buildbot to display."""
+  _PrintForBuildbot(handle, _annotations.BuildStep, name)
diff --git a/utils/frozen_chromite/lib/failure_message_lib.py b/utils/frozen_chromite/lib/failure_message_lib.py
new file mode 100644
index 0000000..6270b65
--- /dev/null
+++ b/utils/frozen_chromite/lib/failure_message_lib.py
@@ -0,0 +1,365 @@
+# -*- coding: utf-8 -*-
+# Copyright 2017 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module to manage stage failure messages."""
+
+from __future__ import print_function
+
+import collections
+import json
+import re
+
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+
+# Currently, an exception is reported to CIDB failureTabe using the exception
+# class name as the exception_type. failure_message_lib.FailureMessageManager
+# uses the exception_type to decide which StageFailureMessage class to use
+# to rebuild the failure message. Whenever you need to change the names of these
+# classes, please add the new class names to their corresponding type lists,
+# and DO NOT remove the old class names from the type lists.
+# TODO (nxia): instead of using the class name as the exception type when
+# reporting an exception to CIDB, we need to have an attribute like
+# EXCEPTION_CATEGORY (say EXCEPTION_TYPE) and this type cannot be changed or
+# removed from EXCEPTION_TYPE_LIST. But we can add new types to the list.
+BUILD_SCRIPT_FAILURE_TYPES = ('BuildScriptFailure',)
+PACKAGE_BUILD_FAILURE_TYPES = ('PackageBuildFailure',)
+
+
+# These keys must exist as column names from failureView in cidb.
+FAILURE_KEYS = (
+    'id', 'build_stage_id', 'outer_failure_id', 'exception_type',
+    'exception_message', 'exception_category', 'extra_info',
+    'timestamp', 'stage_name', 'board', 'stage_status', 'build_id',
+    'master_build_id', 'builder_name', 'build_number',
+    'build_config', 'build_status', 'important', 'buildbucket_id')
+
+
+# A namedtuple containing values fetched from CIDB failureView.
+_StageFailure = collections.namedtuple('_StageFailure', FAILURE_KEYS)
+
+
+class StageFailure(_StageFailure):
+  """A class presenting values of a failure fetched from CIDB failureView."""
+
+  @classmethod
+  def GetStageFailureFromMessage(cls, stage_failure_message):
+    """Create StageFailure from a StageFailureMessage instance.
+
+    Args:
+      stage_failure_message: An instance of StageFailureMessage.
+
+    Returns:
+      An instance of StageFailure.
+    """
+    return StageFailure(
+        stage_failure_message.failure_id,
+        stage_failure_message.build_stage_id,
+        stage_failure_message.outer_failure_id,
+        stage_failure_message.exception_type,
+        stage_failure_message.exception_message,
+        stage_failure_message.exception_category,
+        stage_failure_message.extra_info, None,
+        stage_failure_message.stage_name, None, None, None, None, None, None,
+        None, None, None, None)
+
+  @classmethod
+  def GetStageFailureFromDicts(cls, failure_dict, stage_dict, build_dict):
+    """Get StageFailure from value dictionaries.
+
+    Args:
+      failure_dict: A dict presenting values of a tuple from failureTable.
+      stage_dict: A dict presenting values of a tuple from buildStageTable.
+      build_dict: A dict presenting values of a tuple from buildTable.
+
+    Returns:
+      An instance of StageFailure.
+    """
+    return StageFailure(
+        failure_dict['id'], failure_dict['build_stage_id'],
+        failure_dict['outer_failure_id'], failure_dict['exception_type'],
+        failure_dict['exception_message'], failure_dict['exception_category'],
+        failure_dict['extra_info'], failure_dict['timestamp'],
+        stage_dict['name'], stage_dict['board'], stage_dict['status'],
+        build_dict['id'], build_dict['master_build_id'],
+        build_dict['builder_name'],
+        build_dict['build_number'], build_dict['build_config'],
+        build_dict['status'], build_dict['important'],
+        build_dict['buildbucket_id'])
+
+
+class StageFailureMessage(object):
+  """Message class contains information of a general stage failure.
+
+  Failed stages report stage failures to CIDB failureTable (see more details
+  in failures_lib.ReportStageFailure). This class constructs a failure
+  message instance from the stage failure information stored in CIDB.
+  """
+
+  def __init__(self, stage_failure, extra_info=None, stage_prefix_name=None):
+    """Construct a StageFailureMessage instance.
+
+    Args:
+      stage_failure: An instance of StageFailure.
+      extra_info: The extra info of the origin failure, default to None.
+      stage_prefix_name: The prefix name (string) of the failed stage,
+        default to None.
+    """
+    self.failure_id = stage_failure.id
+    self.build_stage_id = stage_failure.build_stage_id
+    self.stage_name = stage_failure.stage_name
+    self.exception_type = stage_failure.exception_type
+    self.exception_message = stage_failure.exception_message
+    self.exception_category = stage_failure.exception_category
+    self.outer_failure_id = stage_failure.outer_failure_id
+
+    if extra_info is not None:
+      self.extra_info = extra_info
+    else:
+      # No extra_info provided, decode extra_info from stage_failure.
+      self.extra_info = self._DecodeExtraInfo(stage_failure.extra_info)
+
+    if stage_prefix_name is not None:
+      self.stage_prefix_name = stage_prefix_name
+    else:
+      # No stage_prefix_name provided, extra prefix name from stage_failure.
+      self.stage_prefix_name = self._ExtractStagePrefixName(self.stage_name)
+
+  def __str__(self):
+    return ('[failure id] %s [stage name] %s [stage prefix name] %s '
+            '[exception type] %s [exception category] %s [exception message] %s'
+            ' [extra info] %s' %
+            (self.failure_id, self.stage_name, self.stage_prefix_name,
+             self.exception_type, self.exception_category,
+             self.exception_message, self.extra_info))
+
+  def _DecodeExtraInfo(self, extra_info):
+    """Decode extra info json into dict.
+
+    Args:
+      extra_info: The extra_info of the origin exception, default to None.
+
+    Returns:
+      An empty dict if extra_info is None; extra_info itself if extra_info is
+      a dict; else, load the json string into a dict and return it.
+    """
+    if not extra_info:
+      return {}
+    elif isinstance(extra_info, dict):
+      return extra_info
+    else:
+      try:
+        return  json.loads(extra_info)
+      except ValueError as e:
+        logging.error('Cannot decode extra_info: %s', e)
+        return {}
+
+  # TODO(nxia): Force format checking on stage names when they're created
+  def _ExtractStagePrefixName(self, stage_name):
+    """Extract stage prefix name given a full stage name.
+
+    Format examples in our current CIDB buildStageTable:
+      HWTest [bvt-arc] -> HWTest
+      HWTest -> HWTest
+      ImageTest -> ImageTest
+      ImageTest [amd64-generic] -> ImageTest
+      VMTest (attempt 1) -> VMTest
+      VMTest [amd64-generic] (attempt 1) -> VMTest
+
+    Args:
+      stage_name: The full stage name (string) recorded in CIDB.
+
+    Returns:
+      The prefix stage name (string).
+    """
+    pattern = r'([^ ]+)( +\[([^]]+)\])?( +\(([^)]+)\))?'
+    m = re.compile(pattern).match(stage_name)
+    if m is not None:
+      return m.group(1)
+    else:
+      return stage_name
+
+
+class BuildScriptFailureMessage(StageFailureMessage):
+  """Message class contains information of a BuildScriptFailure."""
+
+  def GetShortname(self):
+    """Return the short name (string) of the run command."""
+    return self.extra_info.get('shortname')
+
+
+class PackageBuildFailureMessage(StageFailureMessage):
+  """Message class contains information of a PackagebuildFailure."""
+
+  def GetShortname(self):
+    """Return the short name (string) of the run command."""
+    return self.extra_info.get('shortname')
+
+  def GetFailedPackages(self):
+    """Return a list of packages (strings) that failed to build."""
+    return self.extra_info.get('failed_packages', [])
+
+
+class CompoundFailureMessage(StageFailureMessage):
+  """Message class contains information of a CompoundFailureMessage."""
+
+  def __init__(self, stage_failure, **kwargs):
+    """Construct a CompoundFailureMessage instance.
+
+    Args:
+      stage_failure: An instance of StageFailure.
+      kwargs: Extra message information to pass to StageFailureMessage.
+    """
+    super(CompoundFailureMessage, self).__init__(stage_failure, **kwargs)
+
+    self.inner_failures = []
+
+  def __str__(self):
+    msg_str = super(CompoundFailureMessage, self).__str__()
+
+    for failure in self.inner_failures:
+      msg_str += ('(Inner Stage Failure Message) %s' % str(failure))
+
+    return msg_str
+
+  @staticmethod
+  def GetFailureMessage(failure_message):
+    """Convert a regular failure message instance to CompoundFailureMessage.
+
+    Args:
+      failure_message: An instance of StageFailureMessage.
+
+    Returns:
+      A CompoundFailureMessage instance.
+    """
+    return CompoundFailureMessage(
+        StageFailure.GetStageFailureFromMessage(failure_message),
+        extra_info=failure_message.extra_info,
+        stage_prefix_name=failure_message.stage_prefix_name)
+
+  def HasEmptyList(self):
+    """Check whether the inner failure list is empty.
+
+    Returns:
+      True if self.inner_failures is empty; else, False.
+    """
+    return not bool(self.inner_failures)
+
+  def HasExceptionCategories(self, exception_categories):
+    """Check whether any of the inner failures matches the exception categories.
+
+    Args:
+      exception_categories: A set of exception categories (members of
+        constants.EXCEPTION_CATEGORY_ALL_CATEGORIES).
+
+    Returns:
+      True if any of the inner failures matches a memeber in
+      exception_categories; else, False.
+    """
+    return any(x.exception_category in exception_categories
+               for x in self.inner_failures)
+
+  def MatchesExceptionCategories(self, exception_categories):
+    """Check whether all of the inner failures matches the exception categories.
+
+    Args:
+      exception_categories: A set of exception categories (members of
+        constants.EXCEPTION_CATEGORY_ALL_CATEGORIES).
+
+    Returns:
+      True if all of the inner failures match a memeber in
+      exception_categories; else, False.
+    """
+    return (not self.HasEmptyList() and
+            all(x.exception_category in exception_categories
+                for x in self.inner_failures))
+
+
+class FailureMessageManager(object):
+  """Manager class to create a failure message or reconstruct messages."""
+
+  @classmethod
+  def CreateMessage(cls, stage_failure, **kwargs):
+    """Create a failure message instance depending on the exception type.
+
+    Args:
+      stage_failure: An instance of StageFailure.
+      kwargs: Extra message information to pass to StageFailureMessage.
+
+    Returns:
+      A failure message instance of StageFailureMessage class (or its
+        sub-class)
+    """
+    if stage_failure.exception_type in BUILD_SCRIPT_FAILURE_TYPES:
+      return BuildScriptFailureMessage(stage_failure, **kwargs)
+    elif stage_failure.exception_type in PACKAGE_BUILD_FAILURE_TYPES:
+      return PackageBuildFailureMessage(stage_failure, **kwargs)
+    else:
+      return StageFailureMessage(stage_failure, **kwargs)
+
+  @classmethod
+  def ReconstructMessages(cls, failure_messages):
+    """Reconstruct failure messages by nesting messages.
+
+    A failure message with not none outer_failure_id is an inner failure of its
+    outer failure message(failure_id == outer_failure_id). This method takes a
+    list of failure messages, reconstructs the list by 1) converting the outer
+    failure message into a CompoundFailureMessage instance 2) insert the inner
+    failure messages to the inner_failures list of their outer failure messages.
+    CompoundFailures in CIDB aren't nested
+    (see failures_lib.ReportStageFailure), so there isn't another
+    inner failure list layer in a inner failure message and there're no circular
+    dependencies.
+
+    For example, given failure_messages list
+      [A(failure_id=1),
+       B(failure_id=2, outer_failure_id=1),
+       C(failure_id=3, outer_failure_id=1),
+       D(failure_id=4),
+       E(failure_id=5, outer_failure_id=4),
+       F(failure_id=6)]
+    this method returns a reconstructed list:
+      [A(failure_id=1, inner_failures=[B(failure_id=2, outer_failure_id=1),
+                                       C(failure_id=3, outer_failure_id=1)]),
+       D(failure_id=4, inner_failures=[E(failure_id=5, outer_failure_id=4)]),
+       F(failure_id=6)]
+
+    Args:
+      failure_messages: A list a failure message instances not nested.
+
+    Returns:
+      A list of failure message instances of StageFailureMessage class (or its
+        sub-class). Failure messages with not None outer_failure_id are nested
+        into the inner_failures list of their outer failure messages.
+    """
+    failure_message_dict = {x.failure_id: x for x in failure_messages}
+
+    for failure in failure_messages:
+      if failure.outer_failure_id is not None:
+        assert failure.outer_failure_id in failure_message_dict
+        outer_failure = failure_message_dict[failure.outer_failure_id]
+        if not isinstance(outer_failure, CompoundFailureMessage):
+          outer_failure = CompoundFailureMessage.GetFailureMessage(
+              outer_failure)
+          failure_message_dict[outer_failure.failure_id] = outer_failure
+
+        outer_failure.inner_failures.append(failure)
+        del failure_message_dict[failure.failure_id]
+
+    return list(failure_message_dict.values())
+
+  @classmethod
+  def ConstructStageFailureMessages(cls, stage_failures):
+    """Construct stage failure messages from failure entries from CIDB.
+
+    Args:
+      stage_failures: A list of StageFailure instances.
+
+    Returns:
+      A list of stage failure message instances of StageFailureMessage class
+      (or its sub-class). See return type of ReconstructMessages().
+    """
+    failure_messages = [cls.CreateMessage(f) for f in stage_failures]
+
+    return cls.ReconstructMessages(failure_messages)
diff --git a/utils/frozen_chromite/lib/failures_lib.py b/utils/frozen_chromite/lib/failures_lib.py
new file mode 100644
index 0000000..076cde5
--- /dev/null
+++ b/utils/frozen_chromite/lib/failures_lib.py
@@ -0,0 +1,481 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Classes of failure types."""
+
+from __future__ import print_function
+
+import collections
+import json
+import sys
+import traceback
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import failure_message_lib
+from autotest_lib.utils.frozen_chromite.lib import metrics
+
+
+class StepFailure(Exception):
+  """StepFailure exceptions indicate that a cbuildbot step failed.
+
+  Exceptions that derive from StepFailure should meet the following
+  criteria:
+    1) The failure indicates that a cbuildbot step failed.
+    2) The necessary information to debug the problem has already been
+       printed in the logs for the stage that failed.
+    3) __str__() should be brief enough to include in a Commit Queue
+       failure message.
+  """
+
+  # The constants.EXCEPTION_CATEGORY_ALL_CATEGORIES values that this exception
+  # maps to. Subclasses should redefine this class constant to map to a
+  # different category.
+  EXCEPTION_CATEGORY = constants.EXCEPTION_CATEGORY_UNKNOWN
+
+  def EncodeExtraInfo(self):
+    """Encode extra_info into a json string, can be overwritten by subclasses"""
+
+  def ConvertToStageFailureMessage(self, build_stage_id, stage_name,
+                                   stage_prefix_name=None):
+    """Convert StepFailure to StageFailureMessage.
+
+    Args:
+      build_stage_id: The id of the build stage.
+      stage_name: The name (string) of the failed stage.
+      stage_prefix_name: The prefix name (string) of the failed stage,
+          default to None.
+
+    Returns:
+      An instance of failure_message_lib.StageFailureMessage.
+    """
+    stage_failure = failure_message_lib.StageFailure(
+        None, build_stage_id, None, self.__class__.__name__, str(self),
+        self.EXCEPTION_CATEGORY, self.EncodeExtraInfo(), None, stage_name,
+        None, None, None, None, None, None, None, None, None, None)
+    return failure_message_lib.StageFailureMessage(
+        stage_failure, stage_prefix_name=stage_prefix_name)
+
+
+# A namedtuple to hold information of an exception.
+ExceptInfo = collections.namedtuple(
+    'ExceptInfo', ['type', 'str', 'traceback'])
+
+
+def CreateExceptInfo(exception, tb):
+  """Creates a list of ExceptInfo objects from |exception| and |tb|.
+
+  Creates an ExceptInfo object from |exception| and |tb|. If
+  |exception| is a CompoundFailure with non-empty list of exc_infos,
+  simly returns exception.exc_infos. Note that we do not preserve type
+  of |exception| in this case.
+
+  Args:
+    exception: The exception.
+    tb: The textual traceback.
+
+  Returns:
+    A list of ExceptInfo objects.
+  """
+  if isinstance(exception, CompoundFailure) and exception.exc_infos:
+    return exception.exc_infos
+
+  return [ExceptInfo(exception.__class__, str(exception), tb)]
+
+
+class CompoundFailure(StepFailure):
+  """An exception that contains a list of ExceptInfo objects."""
+
+  def __init__(self, message='', exc_infos=None):
+    """Initializes an CompoundFailure instance.
+
+    Args:
+      message: A string describing the failure.
+      exc_infos: A list of ExceptInfo objects.
+    """
+    self.exc_infos = exc_infos if exc_infos else []
+    if not message:
+      # By default, print all stored ExceptInfo objects. This is the
+      # preferred behavior because we'd always have the full
+      # tracebacks to debug the failure.
+      message = '\n'.join('{e.type}: {e.str}\n{e.traceback}'.format(e=ex)
+                          for ex in self.exc_infos)
+    self.msg = message
+
+    super(CompoundFailure, self).__init__(message)
+
+  def ToSummaryString(self):
+    """Returns a string with type and string of each ExceptInfo object.
+
+    This does not include the textual tracebacks on purpose, so the
+    message is more readable on the waterfall.
+    """
+    if self.HasEmptyList():
+      # Fall back to return self.message if list is empty.
+      return self.msg
+    else:
+      return '\n'.join(['%s: %s' % (e.type, e.str) for e in self.exc_infos])
+
+  def HasEmptyList(self):
+    """Returns True if self.exc_infos is empty."""
+    return not bool(self.exc_infos)
+
+  def HasFailureType(self, cls):
+    """Returns True if any of the failures matches |cls|."""
+    return any(issubclass(x.type, cls) for x in self.exc_infos)
+
+  def MatchesFailureType(self, cls):
+    """Returns True if all failures matches |cls|."""
+    return (not self.HasEmptyList() and
+            all(issubclass(x.type, cls) for x in self.exc_infos))
+
+  def HasFatalFailure(self, whitelist=None):
+    """Determine if there are non-whitlisted failures.
+
+    Args:
+      whitelist: A list of whitelisted exception types.
+
+    Returns:
+      Returns True if any failure is not in |whitelist|.
+    """
+    if not whitelist:
+      return not self.HasEmptyList()
+
+    for ex in self.exc_infos:
+      if all(not issubclass(ex.type, cls) for cls in whitelist):
+        return True
+
+    return False
+
+  def ConvertToStageFailureMessage(self, build_stage_id, stage_name,
+                                   stage_prefix_name=None):
+    """Convert CompoundFailure to StageFailureMessage.
+
+    Args:
+      build_stage_id: The id of the build stage.
+      stage_name: The name (string) of the failed stage.
+      stage_prefix_name: The prefix name (string) of the failed stage,
+          default to None.
+
+    Returns:
+      An instance of failure_message_lib.StageFailureMessage.
+    """
+    stage_failure = failure_message_lib.StageFailure(
+        None, build_stage_id, None, self.__class__.__name__, str(self),
+        self.EXCEPTION_CATEGORY, self.EncodeExtraInfo(), None, stage_name,
+        None, None, None, None, None, None, None, None, None, None)
+    compound_failure_message = failure_message_lib.CompoundFailureMessage(
+        stage_failure, stage_prefix_name=stage_prefix_name)
+
+    for exc_class, exc_str, _ in self.exc_infos:
+      inner_failure = failure_message_lib.StageFailure(
+          None, build_stage_id, None, exc_class.__name__, exc_str,
+          _GetExceptionCategory(exc_class), None, None, stage_name,
+          None, None, None, None, None, None, None, None, None, None)
+      innner_failure_message = failure_message_lib.StageFailureMessage(
+          inner_failure, stage_prefix_name=stage_prefix_name)
+      compound_failure_message.inner_failures.append(innner_failure_message)
+
+    return compound_failure_message
+
+
+class ExitEarlyException(Exception):
+  """Exception when a stage finishes and exits early."""
+
+# ExitEarlyException is to simulate sys.exit(0), and SystemExit derives
+# from BaseException, so should not catch ExitEarlyException as Exception
+# and reset type to re-raise.
+EXCEPTIONS_TO_EXCLUDE = (ExitEarlyException,)
+
+class SetFailureType(object):
+  """A wrapper to re-raise the exception as the pre-set type."""
+
+  def __init__(self, category_exception, source_exception=None,
+               exclude_exceptions=EXCEPTIONS_TO_EXCLUDE):
+    """Initializes the decorator.
+
+    Args:
+      category_exception: The exception type to re-raise as. It must be
+        a subclass of CompoundFailure.
+      source_exception: The exception types to re-raise. By default, re-raise
+        all Exception classes.
+      exclude_exceptions: Do not set the type of the exception if it's subclass
+        of one exception in exclude_exceptions. Default to EXCLUSIVE_EXCEPTIONS.
+    """
+    assert issubclass(category_exception, CompoundFailure)
+    self.category_exception = category_exception
+    self.source_exception = source_exception
+    if self.source_exception is None:
+      self.source_exception = Exception
+    self.exclude_exceptions = exclude_exceptions
+
+  def __call__(self, functor):
+    """Returns a wrapped function."""
+    def wrapped_functor(*args, **kwargs):
+      try:
+        return functor(*args, **kwargs)
+      except self.source_exception:
+        # Get the information about the original exception.
+        exc_type, exc_value, _ = sys.exc_info()
+        exc_traceback = traceback.format_exc()
+        if self.exclude_exceptions is not None:
+          for exclude_exception in self.exclude_exceptions:
+            if issubclass(exc_type, exclude_exception):
+              raise
+        if issubclass(exc_type, self.category_exception):
+          # Do not re-raise if the exception is a subclass of the set
+          # exception type because it offers more information.
+          raise
+        else:
+          exc_infos = CreateExceptInfo(exc_value, exc_traceback)
+          raise self.category_exception(exc_infos=exc_infos)
+
+    return wrapped_functor
+
+
+class RetriableStepFailure(StepFailure):
+  """This exception is thrown when a step failed, but should be retried."""
+
+
+# TODO(nxia): Everytime the class name is changed, add the new class name to
+# BUILD_SCRIPT_FAILURE_TYPES.
+class BuildScriptFailure(StepFailure):
+  """This exception is thrown when a build command failed.
+
+  It is intended to provide a shorter summary of what command failed,
+  for usage in failure messages from the Commit Queue, so as to ensure
+  that developers aren't spammed with giant error messages when common
+  commands (e.g. build_packages) fail.
+  """
+
+  EXCEPTION_CATEGORY = constants.EXCEPTION_CATEGORY_BUILD
+
+  def __init__(self, exception, shortname):
+    """Construct a BuildScriptFailure object.
+
+    Args:
+      exception: A RunCommandError object.
+      shortname: Short name for the command we're running.
+    """
+    StepFailure.__init__(self)
+    assert isinstance(exception, cros_build_lib.RunCommandError)
+    self.exception = exception
+    self.shortname = shortname
+    self.args = (exception, shortname)
+
+  def __str__(self):
+    """Summarize a build command failure briefly."""
+    result = self.exception.result
+    if result.returncode:
+      return '%s failed (code=%s)' % (self.shortname, result.returncode)
+    else:
+      return self.exception.msg
+
+  def EncodeExtraInfo(self):
+    """Encode extra_info into a json string.
+
+    Returns:
+      A json string containing shortname.
+    """
+    extra_info_dict = {
+        'shortname': self.shortname,
+    }
+    return json.dumps(extra_info_dict)
+
+
+# TODO(nxia): Everytime the class name is changed, add the new class name to
+# PACKAGE_BUILD_FAILURE_TYPES
+class PackageBuildFailure(BuildScriptFailure):
+  """This exception is thrown when packages fail to build."""
+
+  def __init__(self, exception, shortname, failed_packages):
+    """Construct a PackageBuildFailure object.
+
+    Args:
+      exception: The underlying exception.
+      shortname: Short name for the command we're running.
+      failed_packages: List of packages that failed to build.
+    """
+    BuildScriptFailure.__init__(self, exception, shortname)
+    self.failed_packages = set(failed_packages)
+    self.args = (exception, shortname, failed_packages)
+
+  def __str__(self):
+    return ('Packages failed in %s: %s'
+            % (self.shortname, ' '.join(sorted(self.failed_packages))))
+
+  def EncodeExtraInfo(self):
+    """Encode extra_info into a json string.
+
+    Returns:
+      A json string containing shortname and failed_packages.
+    """
+    extra_info_dict = {
+        'shortname': self.shortname,
+        'failed_packages': list(self.failed_packages)
+    }
+    return json.dumps(extra_info_dict)
+
+  def BuildCompileFailureOutputJson(self):
+    """Build proto BuildCompileFailureOutput compatible JSON output.
+
+    Returns:
+      A json string with BuildCompileFailureOutput proto as json.
+    """
+    failures = []
+    for pkg in self.failed_packages:
+      failures.append({'rule': 'emerge', 'output_targets': pkg})
+    wrapper = {'failures': failures}
+    return json.dumps(wrapper, indent=2)
+
+class InfrastructureFailure(CompoundFailure):
+  """Raised if a stage fails due to infrastructure issues."""
+
+  EXCEPTION_CATEGORY = constants.EXCEPTION_CATEGORY_INFRA
+
+
+# Chrome OS Test Lab failures.
+class TestLabFailure(InfrastructureFailure):
+  """Raised if a stage fails due to hardware lab infrastructure issues."""
+
+  EXCEPTION_CATEGORY = constants.EXCEPTION_CATEGORY_LAB
+
+
+class SuiteTimedOut(TestLabFailure):
+  """Raised if a test suite timed out with no test failures."""
+
+
+class BoardNotAvailable(TestLabFailure):
+  """Raised if the board is not available in the lab."""
+
+
+class SwarmingProxyFailure(TestLabFailure):
+  """Raised when error related to swarming proxy occurs."""
+
+
+# Gerrit-on-Borg failures.
+class GoBFailure(InfrastructureFailure):
+  """Raised if a stage fails due to Gerrit-on-Borg (GoB) issues."""
+
+
+class GoBQueryFailure(GoBFailure):
+  """Raised if a stage fails due to Gerrit-on-Borg (GoB) query errors."""
+
+
+class GoBSubmitFailure(GoBFailure):
+  """Raised if a stage fails due to Gerrit-on-Borg (GoB) submission errors."""
+
+
+class GoBFetchFailure(GoBFailure):
+  """Raised if a stage fails due to Gerrit-on-Borg (GoB) fetch errors."""
+
+
+# Google Storage failures.
+class GSFailure(InfrastructureFailure):
+  """Raised if a stage fails due to Google Storage (GS) issues."""
+
+
+class GSUploadFailure(GSFailure):
+  """Raised if a stage fails due to Google Storage (GS) upload issues."""
+
+
+class GSDownloadFailure(GSFailure):
+  """Raised if a stage fails due to Google Storage (GS) download issues."""
+
+
+# Builder failures.
+class BuilderFailure(InfrastructureFailure):
+  """Raised if a stage fails due to builder issues."""
+
+
+class MasterSlaveVersionMismatchFailure(BuilderFailure):
+  """Raised if a slave build has a different full_version than its master."""
+
+# Crash collection service failures.
+class CrashCollectionFailure(InfrastructureFailure):
+  """Raised if a stage fails due to crash collection services."""
+
+
+class TestFailure(StepFailure):
+  """Raised if a test stage (e.g. VMTest) fails."""
+
+  EXCEPTION_CATEGORY = constants.EXCEPTION_CATEGORY_TEST
+
+
+class TestWarning(StepFailure):
+  """Raised if a test stage (e.g. VMTest) returns a warning code."""
+
+
+def ReportStageFailure(exception, metrics_fields=None):
+  """Reports stage failure to Mornach along with inner exceptions.
+
+  Args:
+    exception: The failure exception to report.
+    metrics_fields: (Optional) Fields for ts_mon metric.
+  """
+  _InsertFailureToMonarch(
+      exception_category=_GetExceptionCategory(type(exception)),
+      metrics_fields=metrics_fields)
+
+  # This assumes that CompoundFailure can't be nested.
+  if isinstance(exception, CompoundFailure):
+    for exc_class, _, _ in exception.exc_infos:
+      _InsertFailureToMonarch(
+          exception_category=_GetExceptionCategory(exc_class),
+          metrics_fields=metrics_fields)
+
+
+def _InsertFailureToMonarch(
+    exception_category=constants.EXCEPTION_CATEGORY_UNKNOWN,
+    metrics_fields=None):
+  """Report a single stage failure to Mornach if needed.
+
+  Args:
+    exception_category: (Optional) one of
+                        constants.EXCEPTION_CATEGORY_ALL_CATEGORIES,
+                        Default: 'unknown'.
+    metrics_fields: (Optional) Fields for ts_mon metric.
+  """
+  if (metrics_fields is not None and
+      exception_category != constants.EXCEPTION_CATEGORY_UNKNOWN):
+    counter = metrics.Counter(constants.MON_STAGE_FAILURE_COUNT)
+    metrics_fields['exception_category'] = exception_category
+    counter.increment(fields=metrics_fields)
+
+
+def GetStageFailureMessageFromException(stage_name, build_stage_id,
+                                        exception, stage_prefix_name=None):
+  """Get StageFailureMessage from an exception.
+
+  Args:
+    stage_name: The name (string) of the failed stage.
+    build_stage_id: The id of the failed build stage.
+    exception: The BaseException instance to convert to StageFailureMessage.
+    stage_prefix_name: The prefix name (string) of the failed stage,
+        default to None.
+
+  Returns:
+    An instance of failure_message_lib.StageFailureMessage.
+  """
+  if isinstance(exception, StepFailure):
+    return exception.ConvertToStageFailureMessage(
+        build_stage_id, stage_name, stage_prefix_name=stage_prefix_name)
+  else:
+    stage_failure = failure_message_lib.StageFailure(
+        None, build_stage_id, None, type(exception).__name__, str(exception),
+        _GetExceptionCategory(type(exception)), None, None, stage_name,
+        None, None, None, None, None, None, None, None, None, None)
+
+    return failure_message_lib.StageFailureMessage(
+        stage_failure, stage_prefix_name=stage_prefix_name)
+
+
+def _GetExceptionCategory(exception_class):
+  # Do not use try/catch. If a subclass of StepFailure does not have a valid
+  # EXCEPTION_CATEGORY, it is a programming error, not a runtime error.
+  if issubclass(exception_class, StepFailure):
+    return exception_class.EXCEPTION_CATEGORY
+  else:
+    return constants.EXCEPTION_CATEGORY_UNKNOWN
diff --git a/utils/frozen_chromite/lib/gce.py b/utils/frozen_chromite/lib/gce.py
new file mode 100644
index 0000000..f7bdac9
--- /dev/null
+++ b/utils/frozen_chromite/lib/gce.py
@@ -0,0 +1,714 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A convinient wrapper of the GCE python API.
+
+Public methods in class GceContext raise HttpError when the underlining call to
+Google API fails, or gce.Error on other failures.
+"""
+
+from __future__ import print_function
+
+from googleapiclient.discovery import build
+from googleapiclient.errors import HttpError
+from googleapiclient.http import HttpRequest
+import httplib2
+from oauth2client.client import GoogleCredentials
+
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+
+
+class Error(Exception):
+  """Base exception for this module."""
+
+
+class ResourceNotFoundError(Error):
+  """Exceptions raised when requested GCE resource was not found."""
+
+
+class RetryOnServerErrorHttpRequest(HttpRequest):
+  """A HttpRequest that will be retried on server errors automatically."""
+
+  def __init__(self, num_retries, *args, **kwargs):
+    """Constructor for RetryOnServerErrorHttpRequest."""
+    self.num_retries = num_retries
+    super(RetryOnServerErrorHttpRequest, self).__init__(*args, **kwargs)
+
+  def execute(self, http=None, num_retries=None):
+    """Excutes a RetryOnServerErrorHttpRequest.
+
+    HttpRequest.execute() has the option of automatically retrying on server
+    errors, i.e., 500 status codes. Call it with a non-zero value of
+    |num_retries| will cause failed requests to be retried.
+
+    Args:
+      http: The httplib2.http to send this request through.
+      num_retries: Number of retries. Class default value will be used if
+          omitted.
+
+    Returns:
+      A deserialized object model of the response body as determined
+          by the postproc. See HttpRequest.execute().
+    """
+    return super(RetryOnServerErrorHttpRequest, self).execute(
+        http=http, num_retries=num_retries or self.num_retries)
+
+
+def _GetMetdataValue(metadata, key):
+  """Finds a value corresponding to a given metadata key.
+
+  Args:
+    metadata: metadata object, i.e. a dict containing containing 'items'
+      - a list of key-value pairs.
+    key: name of the key.
+
+  Returns:
+    Corresponding value or None if it was not found.
+  """
+  for item in metadata['items']:
+    if item['key'] == key:
+      return item['value']
+  return None
+
+
+def _UpdateMetadataValue(metadata, key, value):
+  """Updates a single key-value pair in a metadata object.
+
+  Args:
+    metadata: metadata object, i.e. a dict containing containing 'items'
+      - a list of key-value pairs.
+    key: name of the key.
+    value: new value for the key, or None if it should be removed.
+  """
+  items = metadata.setdefault('items', [])
+  for item in items:
+    if item['key'] == key:
+      if value is None:
+        items.remove(item)
+      else:
+        item['value'] = value
+      return
+
+  if value is not None:
+    items.append({
+        'key': key,
+        'value': value,
+    })
+
+
+class GceContext(object):
+  """A convinient wrapper around the GCE Python API."""
+
+  # These constants are made public so that users can customize as they need.
+  DEFAULT_TIMEOUT_SEC = 5 * 60
+  INSTANCE_OPERATIONS_TIMEOUT_SEC = 10 * 60
+  IMAGE_OPERATIONS_TIMEOUT_SEC = 10 * 60
+
+  _GCE_SCOPES = (
+      'https://www.googleapis.com/auth/compute',  # CreateInstance, CreateImage
+      'https://www.googleapis.com/auth/devstorage.full_control', # CreateImage
+  )
+  _DEFAULT_NETWORK = 'default'
+  _DEFAULT_MACHINE_TYPE = 'n1-standard-8'
+
+  # Project default service account and scopes.
+  _DEFAULT_SERVICE_ACCOUNT_EMAIL = 'default'
+  # The list is in line with what the gcloud cli uses.
+  # https://cloud.google.com/sdk/gcloud/reference/compute/instances/create
+  _DEFAULT_INSTANCE_SCOPES = [
+      'https://www.googleapis.com/auth/cloud.useraccounts.readonly',
+      'https://www.googleapis.com/auth/devstorage.read_only',
+      'https://www.googleapis.com/auth/logging.write',
+  ]
+
+  # This is made public to allow easy customization of the retry behavior.
+  RETRIES = 2
+
+  def __init__(self, project, zone, credentials, thread_safe=False):
+    """Initializes GceContext.
+
+    Args:
+      project: The GCP project to create instances in.
+      zone: The default zone to create instances in.
+      credentials: The credentials used to call the GCE API.
+      thread_safe: Whether the client is expected to be thread safe.
+    """
+    self.project = project
+    self.zone = zone
+
+    def _BuildRequest(http, *args, **kwargs):
+      """Custom request builder."""
+      return self._BuildRetriableRequest(self.RETRIES, http, thread_safe,
+                                         credentials, *args, **kwargs)
+
+    self.gce_client = build('compute', 'v1', credentials=credentials,
+                            requestBuilder=_BuildRequest)
+
+    self.region = self.GetZoneRegion(zone)
+
+  @classmethod
+  def ForServiceAccount(cls, project, zone, json_key_file):
+    """Creates a GceContext using service account credentials.
+
+    About service account:
+    https://developers.google.com/api-client-library/python/auth/service-accounts
+
+    Args:
+      project: The GCP project to create images and instances in.
+      zone: The default zone to create instances in.
+      json_key_file: Path to the service account JSON key.
+
+    Returns:
+      GceContext.
+    """
+    credentials = GoogleCredentials.from_stream(json_key_file).create_scoped(
+        cls._GCE_SCOPES)
+    return GceContext(project, zone, credentials)
+
+  @classmethod
+  def ForServiceAccountThreadSafe(cls, project, zone, json_key_file):
+    """Creates a thread-safe GceContext using service account credentials.
+
+    About service account:
+    https://developers.google.com/api-client-library/python/auth/service-accounts
+
+    Args:
+      project: The GCP project to create images and instances in.
+      zone: The default zone to create instances in.
+      json_key_file: Path to the service account JSON key.
+
+    Returns:
+      GceContext.
+    """
+    credentials = GoogleCredentials.from_stream(json_key_file).create_scoped(
+        cls._GCE_SCOPES)
+    return GceContext(project, zone, credentials, thread_safe=True)
+
+  def CreateAddress(self, name, region=None):
+    """Reserves an external IP address.
+
+    Args:
+      name: The name to assign to the address.
+      region: Region to reserved the address in.
+
+    Returns:
+      The reserved address as a string.
+    """
+    body = {
+        'name': name,
+    }
+    operation = self.gce_client.addresses().insert(
+        project=self.project,
+        region=region or self.region,
+        body=body).execute()
+    self._WaitForRegionOperation(
+        operation['name'], region,
+        timeout_sec=self.INSTANCE_OPERATIONS_TIMEOUT_SEC)
+
+    address = self.gce_client.addresses().get(
+        project=self.project,
+        region=region or self.region,
+        address=name).execute()
+
+    return address['address']
+
+  def DeleteAddress(self, name, region=None):
+    """Frees up an external IP address.
+
+    Args:
+      name: The name of the address.
+      region: Region of the address.
+    """
+    operation = self.gce_client.addresses().delete(
+        project=self.project,
+        region=region or self.region,
+        address=name).execute()
+    self._WaitForRegionOperation(
+        operation['name'], region=region or self.region,
+        timeout_sec=self.INSTANCE_OPERATIONS_TIMEOUT_SEC)
+
+  def GetZoneRegion(self, zone=None):
+    """Resolves name of the region that a zone belongs to.
+
+    Args:
+      zone: The zone to resolve.
+
+    Returns:
+      Name of the region corresponding to the zone.
+    """
+    zone_resource = self.gce_client.zones().get(
+        project=self.project,
+        zone=zone or self.zone).execute()
+    return zone_resource['region'].split('/')[-1]
+
+  def CreateInstance(self, name, image, zone=None, network=None, subnet=None,
+                     machine_type=None, default_scopes=True,
+                     static_address=None, **kwargs):
+    """Creates an instance with the given image and waits until it's ready.
+
+    Args:
+      name: Instance name.
+      image: Fully spelled URL of the image, e.g., for private images,
+          'global/images/my-private-image', or for images from a
+          publicly-available project,
+          'projects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD'.
+          Details:
+          https://cloud.google.com/compute/docs/reference/latest/instances/insert
+      zone: The zone to create the instance in. Default zone will be used if
+          omitted.
+      network: An existing network to create the instance in. Default network
+          will be used if omitted.
+      subnet: The subnet to create the instance in.
+      machine_type: The machine type to use. Default machine type will be used
+          if omitted.
+      default_scopes: If true, the default scopes are added to the instances.
+      static_address: External IP address to assign to the instance as a string.
+          If None an emphemeral address will be used.
+      kwargs: Other possible Instance Resource properties.
+          https://cloud.google.com/compute/docs/reference/latest/instances#resource
+          Note that values from kwargs will overrule properties constructed from
+          positinal arguments, i.e., name, image, zone, network and
+          machine_type.
+
+    Returns:
+      URL to the created instance.
+    """
+    logging.info('Creating instance "%s" with image "%s" ...', name, image)
+    network = 'global/networks/%s' % network or self._DEFAULT_NETWORK
+    machine_type = 'zones/%s/machineTypes/%s' % (
+        zone or self.zone, machine_type or self._DEFAULT_MACHINE_TYPE)
+    service_accounts = (
+        {
+            'email': self._DEFAULT_SERVICE_ACCOUNT_EMAIL,
+            'scopes': self._DEFAULT_INSTANCE_SCOPES,
+        },
+    ) if default_scopes else ()
+
+    config = {
+        'name': name,
+        'machineType': machine_type,
+        'disks': (
+            {
+                'boot': True,
+                'autoDelete': True,
+                'initializeParams': {
+                    'sourceImage': image,
+                },
+            },
+        ),
+        'networkInterfaces': (
+            {
+                'network': network,
+                'accessConfigs': (
+                    {
+                        'type': 'ONE_TO_ONE_NAT',
+                        'name': 'External NAT',
+                    },
+                ),
+            },
+        ),
+        'serviceAccounts' : service_accounts,
+    }
+    config.update(**kwargs)
+    if static_address is not None:
+      config['networkInterfaces'][0]['accessConfigs'][0]['natIP'] = (
+          static_address)
+    if subnet is not None:
+      region = self.GetZoneRegion(zone)
+      config['networkInterfaces'][0]['subnetwork'] = (
+          'regions/%s/subnetworks/%s' % (region, subnet)
+      )
+    operation = self.gce_client.instances().insert(
+        project=self.project,
+        zone=zone or self.zone,
+        body=config).execute()
+    self._WaitForZoneOperation(
+        operation['name'],
+        timeout_sec=self.INSTANCE_OPERATIONS_TIMEOUT_SEC,
+        timeout_handler=lambda: self.DeleteInstance(name))
+    return operation['targetLink']
+
+  def DeleteInstance(self, name, zone=None):
+    """Deletes an instance with the name and waits until it's done.
+
+    Args:
+      name: Name of the instance to delete.
+      zone: Zone where the instance is in. Default zone will be used if omitted.
+    """
+    logging.info('Deleting instance "%s" ...', name)
+    operation = self.gce_client.instances().delete(
+        project=self.project,
+        zone=zone or self.zone,
+        instance=name).execute()
+    self._WaitForZoneOperation(
+        operation['name'], timeout_sec=self.INSTANCE_OPERATIONS_TIMEOUT_SEC)
+
+  def StartInstance(self, name, zone=None):
+    """Starts an instance with the name and waits until it's done.
+
+    Args:
+      name: Name of the instance to start.
+      zone: Zone where the instance is in. Default zone will be used if omitted.
+    """
+    logging.info('Starting instance "%s" ...', name)
+    operation = self.gce_client.instances().start(
+        project=self.project,
+        zone=zone or self.zone,
+        instance=name).execute()
+    self._WaitForZoneOperation(
+        operation['name'], timeout_sec=self.INSTANCE_OPERATIONS_TIMEOUT_SEC)
+
+  def StopInstance(self, name, zone=None):
+    """Stops an instance with the name and waits until it's done.
+
+    Args:
+      name: Name of the instance to stop.
+      zone: Zone where the instance is in. Default zone will be used if omitted.
+    """
+    logging.info('Stopping instance "%s" ...', name)
+    operation = self.gce_client.instances().stop(
+        project=self.project,
+        zone=zone or self.zone,
+        instance=name).execute()
+    self._WaitForZoneOperation(
+        operation['name'], timeout_sec=self.INSTANCE_OPERATIONS_TIMEOUT_SEC)
+
+  def CreateImage(self, name, source):
+    """Creates an image with the given |source|.
+
+    Args:
+      name: Name of the image to be created.
+      source:
+        Google Cloud Storage object of the source disk, e.g.,
+        'https://storage.googleapis.com/my-gcs-bucket/test_image.tar.gz'.
+
+    Returns:
+      URL to the created image.
+    """
+    logging.info('Creating image "%s" with "source" %s ...', name, source)
+    config = {
+        'name': name,
+        'rawDisk': {
+            'source': source,
+        },
+    }
+    operation = self.gce_client.images().insert(
+        project=self.project,
+        body=config).execute()
+    self._WaitForGlobalOperation(operation['name'],
+                                 timeout_sec=self.IMAGE_OPERATIONS_TIMEOUT_SEC,
+                                 timeout_handler=lambda: self.DeleteImage(name))
+    return operation['targetLink']
+
+  def DeleteImage(self, name):
+    """Deletes an image and waits until it's deleted.
+
+    Args:
+      name: Name of the image to delete.
+    """
+    logging.info('Deleting image "%s" ...', name)
+    operation = self.gce_client.images().delete(
+        project=self.project,
+        image=name).execute()
+    self._WaitForGlobalOperation(operation['name'],
+                                 timeout_sec=self.IMAGE_OPERATIONS_TIMEOUT_SEC)
+
+  def ListInstances(self, zone=None):
+    """Lists all instances.
+
+    Args:
+      zone: Zone where the instances are in. Default zone will be used if
+            omitted.
+
+    Returns:
+      A list of Instance Resources if found, or an empty list otherwise.
+    """
+    result = self.gce_client.instances().list(project=self.project,
+                                              zone=zone or self.zone).execute()
+    return result.get('items', [])
+
+  def ListImages(self):
+    """Lists all images.
+
+    Returns:
+      A list of Image Resources if found, or an empty list otherwise.
+    """
+    result = self.gce_client.images().list(project=self.project).execute()
+    return result.get('items', [])
+
+  def GetInstance(self, instance, zone=None):
+    """Gets an Instance Resource by name and zone.
+
+    Args:
+      instance: Name of the instance.
+      zone: Zone where the instance is in. Default zone will be used if omitted.
+
+    Returns:
+      An Instance Resource.
+
+    Raises:
+      ResourceNotFoundError if instance was not found, or HttpError on other
+      HTTP failures.
+    """
+    try:
+      return self.gce_client.instances().get(project=self.project,
+                                             zone=zone or self.zone,
+                                             instance=instance).execute()
+    except HttpError as e:
+      if e.resp.status == 404:
+        raise ResourceNotFoundError(
+            'Instance "%s" for project "%s" in zone "%s" was not found.' %
+            (instance, self.project, zone or self.zone))
+      else:
+        raise
+
+  def GetInstanceIP(self, instance, zone=None):
+    """Gets the external IP of an instance.
+
+    Args:
+      instance: Name of the instance to get IP for.
+      zone: Zone where the instance is in. Default zone will be used if omitted.
+
+    Returns:
+      External IP address of the instance.
+
+    Raises:
+      Error: Something went wrong when trying to get IP for the instance.
+    """
+    result = self.GetInstance(instance, zone)
+    try:
+      return result['networkInterfaces'][0]['accessConfigs'][0]['natIP']
+    except (KeyError, IndexError):
+      raise Error('Failed to get IP address for instance %s' % instance)
+
+  def GetInstanceInternalIP(self, instance, zone=None):
+    """Gets the internal IP of an instance."""
+    result = self.GetInstance(instance, zone)
+    try:
+      return result['networkInterfaces'][0]['networkIP']
+    except (KeyError, IndexError):
+      raise Error('Failed to get internal IP for instance %s' % instance)
+
+  def GetImage(self, image):
+    """Gets an Image Resource by name.
+
+    Args:
+      image: Name of the image to look for.
+
+    Returns:
+      An Image Resource.
+
+    Raises:
+      ResourceNotFoundError: The requested image was not found.
+    """
+    try:
+      return self.gce_client.images().get(project=self.project,
+                                          image=image).execute()
+    except HttpError as e:
+      if e.resp.status == 404:
+        raise ResourceNotFoundError('Image "%s" for project "%s" was not found.'
+                                    % (image, self.project))
+      else:
+        raise
+
+  def InstanceExists(self, instance, zone=None):
+    """Checks if an instance exists in the current project.
+
+    Args:
+      instance: Name of the instance to check existence of.
+      zone: Zone where the instance is in. Default zone will be used if omitted.
+
+    Returns:
+      True if the instance exists or False otherwise.
+    """
+    try:
+      return self.GetInstance(instance, zone) is not None
+    except ResourceNotFoundError:
+      return False
+
+  def ImageExists(self, image):
+    """Checks if an image exists in the current project.
+
+    Args:
+      image: Name of the image to check existence of.
+
+    Returns:
+      True if the instance exists or False otherwise.
+    """
+    try:
+      return self.GetImage(image) is not None
+    except ResourceNotFoundError:
+      return False
+
+  def GetCommonInstanceMetadata(self, key):
+    """Looks up a single project metadata value.
+
+    Args:
+      key: Metadata key name.
+
+    Returns:
+      Metadata value corresponding to the key, or None if it was not found.
+    """
+    projects_data = self.gce_client.projects().get(
+        project=self.project).execute()
+    metadata = projects_data['commonInstanceMetadata']
+    return _GetMetdataValue(metadata, key)
+
+  def SetCommonInstanceMetadata(self, key, value):
+    """Sets a single project metadata value.
+
+    Args:
+      key: Metadata key to be set.
+      value: New value, or None if the given key should be removed.
+    """
+    projects_data = self.gce_client.projects().get(
+        project=self.project).execute()
+    metadata = projects_data['commonInstanceMetadata']
+    _UpdateMetadataValue(metadata, key, value)
+    operation = self.gce_client.projects().setCommonInstanceMetadata(
+        project=self.project,
+        body=metadata).execute()
+    self._WaitForGlobalOperation(operation['name'])
+
+  def GetInstanceMetadata(self, instance, key):
+    """Looks up instance's metadata value.
+
+    Args:
+      instance: Name of the instance.
+      key: Metadata key name.
+
+    Returns:
+      Metadata value corresponding to the key, or None if it was not found.
+    """
+    instance_data = self.GetInstance(instance)
+    metadata = instance_data['metadata']
+    return self._GetMetdataValue(metadata, key)
+
+  def SetInstanceMetadata(self, instance, key, value):
+    """Sets a single instance metadata value.
+
+    Args:
+      instance: Name of the instance.
+      key: Metadata key to be set.
+      value: New value, or None if the given key should be removed.
+    """
+    instance_data = self.GetInstance(instance)
+    metadata = instance_data['metadata']
+    _UpdateMetadataValue(metadata, key, value)
+    operation = self.gce_client.instances().setMetadata(
+        project=self.project,
+        zone=self.zone,
+        instance=instance,
+        body=metadata).execute()
+    self._WaitForZoneOperation(operation['name'])
+
+  def _WaitForZoneOperation(self, operation, zone=None, timeout_sec=None,
+                            timeout_handler=None):
+    """Waits until a GCE ZoneOperation is finished or timed out.
+
+    Args:
+      operation: The GCE operation to wait for.
+      zone: The zone that |operation| belongs to.
+      timeout_sec: The maximum number of seconds to wait for.
+      timeout_handler: A callable to be executed when timeout happens.
+
+    Raises:
+      Error when timeout happens or the operation fails.
+    """
+    get_request = self.gce_client.zoneOperations().get(
+        project=self.project, zone=zone or self.zone, operation=operation)
+    self._WaitForOperation(operation, get_request, timeout_sec,
+                           timeout_handler=timeout_handler)
+
+  def _WaitForRegionOperation(self, operation, region, timeout_sec=None,
+                              timeout_handler=None):
+    """Waits until a GCE RegionOperation is finished or timed out.
+
+    Args:
+      operation: The GCE operation to wait for.
+      region: The region that |operation| belongs to.
+      timeout_sec: The maximum number of seconds to wait for.
+      timeout_handler: A callable to be executed when timeout happens.
+
+    Raises:
+      Error when timeout happens or the operation fails.
+    """
+    get_request = self.gce_client.regionOperations().get(
+        project=self.project, region=region or self.region, operation=operation)
+    self._WaitForOperation(operation, get_request, timeout_sec,
+                           timeout_handler=timeout_handler)
+
+  def _WaitForGlobalOperation(self, operation, timeout_sec=None,
+                              timeout_handler=None):
+    """Waits until a GCE GlobalOperation is finished or timed out.
+
+    Args:
+      operation: The GCE operation to wait for.
+      timeout_sec: The maximum number of seconds to wait for.
+      timeout_handler: A callable to be executed when timeout happens.
+
+    Raises:
+      Error when timeout happens or the operation fails.
+    """
+    get_request = self.gce_client.globalOperations().get(project=self.project,
+                                                         operation=operation)
+    self._WaitForOperation(operation, get_request, timeout_sec=timeout_sec,
+                           timeout_handler=timeout_handler)
+
+  def _WaitForOperation(self, operation, get_operation_request,
+                        timeout_sec=None, timeout_handler=None):
+    """Waits until timeout or the request gets a response with a 'DONE' status.
+
+    Args:
+      operation: The GCE operation to wait for.
+      get_operation_request:
+        The HTTP request to get the operation's status.
+        This request will be executed periodically until it returns a status
+        'DONE'.
+      timeout_sec: The maximum number of seconds to wait for.
+      timeout_handler: A callable to be executed when times out.
+
+    Raises:
+      Error when timeout happens or the operation fails.
+    """
+    def _IsDone():
+      result = get_operation_request.execute()
+      if result['status'] == 'DONE':
+        if 'error' in result:
+          raise Error(result['error'])
+        return True
+      return False
+
+    try:
+      timeout = timeout_sec or self.DEFAULT_TIMEOUT_SEC
+      logging.info('Waiting up to %d seconds for operation [%s] to complete...',
+                   timeout, operation)
+      timeout_util.WaitForReturnTrue(_IsDone, timeout, period=1)
+    except timeout_util.TimeoutError:
+      if timeout_handler:
+        timeout_handler()
+      raise Error('Timeout wating for operation [%s] to complete' % operation)
+
+  def _BuildRetriableRequest(self, num_retries, http, thread_safe=False,
+                             credentials=None, *args, **kwargs):
+    """Builds a request that will be automatically retried on server errors.
+
+    Args:
+      num_retries: The maximum number of times to retry until give up.
+      http: An httplib2.Http object that this request will be executed through.
+      thread_safe: Whether or not the request needs to be thread-safe.
+      credentials: Credentials to apply to the request.
+      *args: Optional positional arguments.
+      **kwargs: Optional keyword arguments.
+
+    Returns:
+      RetryOnServerErrorHttpRequest: A request that will automatically retried
+          on server errors.
+    """
+    if thread_safe:
+      # Create a new http object for every request.
+      http = credentials.authorize(httplib2.Http())
+    return RetryOnServerErrorHttpRequest(num_retries, http, *args, **kwargs)
diff --git a/utils/frozen_chromite/lib/git.py b/utils/frozen_chromite/lib/git.py
new file mode 100644
index 0000000..9e578ec
--- /dev/null
+++ b/utils/frozen_chromite/lib/git.py
@@ -0,0 +1,1567 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common functions for interacting with git and repo."""
+
+from __future__ import print_function
+
+import collections
+import datetime
+import errno
+import fnmatch
+import hashlib
+import os
+import re
+import string
+import subprocess
+from xml import sax
+
+import six
+
+from autotest_lib.utils.frozen_chromite.lib import config_lib
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import osutils
+
+
+class GitException(Exception):
+  """An exception related to git."""
+
+
+# remote: git remote name (e.g., 'origin',
+#   'https://chromium.googlesource.com/chromiumos/chromite.git', etc.).
+# ref: git remote/local ref name (e.g., 'refs/heads/master').
+# project_name: git project name (e.g., 'chromiumos/chromite'.)
+_RemoteRef = collections.namedtuple(
+    '_RemoteRef', ('remote', 'ref', 'project_name'))
+
+
+class RemoteRef(_RemoteRef):
+  """Object representing a remote ref."""
+
+  def __new__(cls, remote, ref, project_name=None):
+    return super(RemoteRef, cls).__new__(cls, remote, ref, project_name)
+
+
+def FindRepoDir(path):
+  """Returns the nearest higher-level repo dir from the specified path.
+
+  Args:
+    path: The path to use. Defaults to cwd.
+  """
+  return osutils.FindInPathParents(
+      '.repo', path, test_func=os.path.isdir)
+
+
+def FindRepoCheckoutRoot(path):
+  """Get the root of your repo managed checkout."""
+  repo_dir = FindRepoDir(path)
+  if repo_dir:
+    return os.path.dirname(repo_dir)
+  else:
+    return None
+
+
+def IsSubmoduleCheckoutRoot(path, remote, url):
+  """Tests to see if a directory is the root of a git submodule checkout.
+
+  Args:
+    path: The directory to test.
+    remote: The remote to compare the |url| with.
+    url: The exact URL the |remote| needs to be pointed at.
+  """
+  if os.path.isdir(path):
+    remote_url = cros_build_lib.run(
+        ['git', '--git-dir', path, 'config', 'remote.%s.url' % remote],
+        stdout=True, debug_level=logging.DEBUG,
+        check=False, encoding='utf-8').output.strip()
+    if remote_url == url:
+      return True
+  return False
+
+
+def GetGitGitdir(pwd):
+  """Probes for a git gitdir directory rooted at a directory.
+
+  Args:
+    pwd: Directory to probe. If a checkout, should be the root.
+
+  Returns:
+    Path of the gitdir directory. None if the directory is not a git repo.
+  """
+  if os.path.isdir(os.path.join(pwd, '.git')):
+    return os.path.join(pwd, '.git')
+  # Is this directory a bare repo with no checkout?
+  if os.path.isdir(os.path.join(
+      pwd, 'objects')) and os.path.isdir(os.path.join(pwd, 'refs')):
+    return pwd
+  return None
+
+
+def IsGitRepositoryCorrupted(cwd):
+  """Verify that the specified git repository is not corrupted.
+
+  Args:
+    cwd: The git repository to verify.
+
+  Returns:
+    True if the repository is corrupted.
+  """
+  cmd = ['fsck', '--no-progress', '--no-dangling']
+  try:
+    GarbageCollection(cwd)
+    RunGit(cwd, cmd)
+    return False
+  except cros_build_lib.RunCommandError as ex:
+    logging.warning(str(ex))
+    return True
+
+
+_HEX_CHARS = frozenset(string.hexdigits)
+
+
+def IsSHA1(value, full=True):
+  """Returns True if the given value looks like a sha1.
+
+  If full is True, then it must be full length- 40 chars.  If False, >=6, and
+  <40.
+  """
+  if not all(x in _HEX_CHARS for x in value):
+    return False
+  l = len(value)
+  if full:
+    return l == 40
+  return l >= 6 and l <= 40
+
+
+def IsRefsTags(value):
+  """Return True if the given value looks like a tag.
+
+  Currently this is identified via refs/tags/ prefixing.
+  """
+  return value.startswith('refs/tags/')
+
+
+def GetGitRepoRevision(cwd, branch='HEAD', short=False):
+  """Find the revision of a branch.
+
+  Args:
+    cwd: The git repository to work with.
+    branch: Branch name. Defaults to current branch.
+    short: If set, output shorter unique SHA-1.
+
+  Returns:
+    Revision SHA-1.
+  """
+  cmd = ['rev-parse', branch]
+  if short:
+    cmd.insert(1, '--short')
+  return RunGit(cwd, cmd).output.strip()
+
+
+def IsReachable(cwd, to_ref, from_ref):
+  """Determine whether one commit ref is reachable from another.
+
+  Args:
+    cwd: The git repository to work with.
+    to_ref: The commit ref that may be reachable.
+    from_ref: The commit ref that |to_ref| may be reachable from.
+
+  Returns:
+    True if |to_ref| is reachable from |from_ref|.
+
+  Raises:
+    RunCommandError: if some error occurs, such as a commit ref not existing.
+  """
+  try:
+    RunGit(cwd, ['merge-base', '--is-ancestor', to_ref, from_ref])
+  except cros_build_lib.RunCommandError as e:
+    if e.result.returncode == 1:
+      return False
+    raise
+  return True
+
+
+def DoesCommitExistInRepo(cwd, commit):
+  """Determine whether a commit (SHA1 or ref) exists in a repo.
+
+  Args:
+    cwd: A directory within the project repo.
+    commit: The commit to look for. This can be a SHA1 or it can be a ref.
+
+  Returns:
+    True if the commit exists in the repo.
+  """
+  try:
+    RunGit(cwd, ['rev-list', '-n1', commit, '--'])
+  except cros_build_lib.RunCommandError as e:
+    if e.result.returncode == 128:
+      return False
+    raise
+  return True
+
+
+def GetCurrentBranch(cwd):
+  """Returns current branch of a repo, and None if repo is on detached HEAD."""
+  try:
+    ret = RunGit(cwd, ['symbolic-ref', '-q', 'HEAD'])
+    return StripRefsHeads(ret.output.strip(), False)
+  except cros_build_lib.RunCommandError as e:
+    if e.result.returncode != 1:
+      raise
+    return None
+
+
+def StripRefsHeads(ref, strict=True):
+  """Remove leading 'refs/heads/' from a ref name.
+
+  If strict is True, an Exception is thrown if the ref doesn't start with
+  refs/heads.  If strict is False, the original ref is returned.
+  """
+  if not ref.startswith('refs/heads/') and strict:
+    raise Exception('Ref name %s does not start with refs/heads/' % ref)
+
+  return ref.replace('refs/heads/', '')
+
+
+def StripRefs(ref):
+  """Remove leading 'refs/heads', 'refs/remotes/[^/]+/' from a ref name."""
+  ref = StripRefsHeads(ref, False)
+  if ref.startswith('refs/remotes/'):
+    return ref.split('/', 3)[-1]
+  return ref
+
+
+def NormalizeRef(ref):
+  """Convert git branch refs into fully qualified form."""
+  if ref and not ref.startswith('refs/'):
+    ref = 'refs/heads/%s' % ref
+  return ref
+
+
+def NormalizeRemoteRef(remote, ref):
+  """Convert git branch refs into fully qualified remote form."""
+  if ref:
+    # Support changing local ref to remote ref, or changing the remote
+    # for a remote ref.
+    ref = StripRefs(ref)
+
+    if not ref.startswith('refs/'):
+      ref = 'refs/remotes/%s/%s' % (remote, ref)
+
+  return ref
+
+
+class ProjectCheckout(dict):
+  """Attributes of a given project in the manifest checkout.
+
+  TODO(davidjames): Convert this into an ordinary object instead of a dict.
+  """
+
+  def __init__(self, attrs):
+    """Constructor.
+
+    Args:
+      attrs: The attributes associated with this checkout, as a dictionary.
+    """
+    dict.__init__(self, attrs)
+
+  def AssertPushable(self):
+    """Verify that it is safe to push changes to this repository."""
+    if not self['pushable']:
+      remote = self['remote']
+      raise AssertionError('Remote %s is not pushable.' % (remote,))
+
+  def IsBranchableProject(self):
+    """Return whether we can create a branch in the repo for this project."""
+    # Backwards compatibility is an issue here. Older manifests used a heuristic
+    # based on where the project is hosted. We must continue supporting it.
+    # (crbug.com/470690)
+    # Prefer explicit tagging.
+    if (self[constants.MANIFEST_ATTR_BRANCHING] ==
+        constants.MANIFEST_ATTR_BRANCHING_CREATE):
+      return True
+    if self[constants.MANIFEST_ATTR_BRANCHING] in (
+        constants.MANIFEST_ATTR_BRANCHING_PIN,
+        constants.MANIFEST_ATTR_BRANCHING_TOT):
+      return False
+
+    # Old heuristic.
+    site_params = config_lib.GetSiteParams()
+    if (self['remote'] not in site_params.CROS_REMOTES or
+        self['remote'] not in site_params.BRANCHABLE_PROJECTS):
+      return False
+    return re.match(site_params.BRANCHABLE_PROJECTS[self['remote']],
+                    self['name'])
+
+  def IsPinnableProject(self):
+    """Return whether we should pin to a revision on the CrOS branch."""
+    # Backwards compatibility is an issue here. Older manifests used a different
+    # tag to spcify pinning behaviour. Support both for now. (crbug.com/470690)
+    # Prefer explicit tagging.
+    if self[constants.MANIFEST_ATTR_BRANCHING] != '':
+      return (self[constants.MANIFEST_ATTR_BRANCHING] ==
+              constants.MANIFEST_ATTR_BRANCHING_PIN)
+
+    # Old heuristic.
+    return cros_build_lib.BooleanShellValue(self.get('pin'), True)
+
+  def GetPath(self, absolute=False):
+    """Get the path to the checkout.
+
+    Args:
+      absolute: If True, return an absolute path. If False,
+        return a path relative to the repo root.
+    """
+    return self['local_path'] if absolute else self['path']
+
+
+class Manifest(object):
+  """SAX handler that parses the manifest document.
+
+  Attributes:
+    checkouts_by_name: A dictionary mapping the names for <project> tags to a
+      list of ProjectCheckout objects.
+    checkouts_by_path: A dictionary mapping paths for <project> tags to a single
+      ProjectCheckout object.
+    default: The attributes of the <default> tag.
+    includes: A list of XML files that should be pulled in to the manifest.
+      These includes are represented as a list of (name, path) tuples.
+    manifest_include_dir: If given, this is where to start looking for
+      include targets.
+    projects: DEPRECATED. A dictionary mapping the names for <project> tags to
+      a single ProjectCheckout object. This is now deprecated, since each
+      project can map to multiple ProjectCheckout objects.
+    remotes: A dictionary mapping <remote> tags to the associated attributes.
+    revision: The revision of the manifest repository. If not specified, this
+      will be TOT.
+  """
+
+  _instance_cache = {}
+
+  def __init__(self, source, manifest_include_dir=None):
+    """Initialize this instance.
+
+    Args:
+      source: The path to the manifest to parse.  May be a file handle.
+      manifest_include_dir: If given, this is where to start looking for
+        include targets.
+    """
+    self.source = source
+    self.default = {}
+    self._current_project_path = None
+    self._current_project_name = None
+    self._annotations = {}
+    self.checkouts_by_path = {}
+    self.checkouts_by_name = {}
+    self.remotes = {}
+    self.includes = []
+    self.revision = None
+    self.manifest_include_dir = manifest_include_dir
+    self._RunParser(source)
+    self.includes = tuple(self.includes)
+
+  def _RequireAttr(self, attr, attrs):
+    name = attrs.get('name')
+    assert attr in attrs, ('%s is missing a "%s" attribute; attrs: %r' %
+                           (name, attr, attrs))
+
+  def _RunParser(self, source, finalize=True):
+    parser = sax.make_parser()
+    handler = sax.handler.ContentHandler()
+    handler.startElement = self._StartElement
+    handler.endElement = self._EndElement
+    parser.setContentHandler(handler)
+
+    # Python 2 seems to expect either a file name (as a string) or an
+    # opened file as the parameter to parser.parse, whereas Python 3
+    # seems to expect a URL (as a string) or opened file. Make it
+    # compatible with both by opening files first.
+    with cros_build_lib.Open(source) as f:
+      parser.parse(f)
+
+    if finalize:
+      self._FinalizeAllProjectData()
+
+  def _StartElement(self, name, attrs):
+    """Stores the default manifest properties and per-project overrides."""
+    attrs = dict(attrs.items())
+    if name == 'default':
+      self.default = attrs
+    elif name == 'remote':
+      self._RequireAttr('name', attrs)
+      attrs.setdefault('alias', attrs['name'])
+      self.remotes[attrs['name']] = attrs
+    elif name == 'project':
+      self._RequireAttr('name', attrs)
+      self._current_project_path = attrs.get('path', attrs['name'])
+      self._current_project_name = attrs['name']
+      self.checkouts_by_path[self._current_project_path] = attrs
+      checkout = self.checkouts_by_name.setdefault(self._current_project_name,
+                                                   [])
+      checkout.append(attrs)
+      self._annotations = {}
+    elif name == 'annotation':
+      self._RequireAttr('name', attrs)
+      self._RequireAttr('value', attrs)
+      self._annotations[attrs['name']] = attrs['value']
+    elif name == 'manifest':
+      self.revision = attrs.get('revision')
+    elif name == 'include':
+      if self.manifest_include_dir is None:
+        raise OSError(
+            errno.ENOENT, 'No manifest_include_dir given, but an include was '
+            'encountered; attrs=%r' % (attrs,))
+      # Include is calculated relative to the manifest that has the include;
+      # thus set the path temporarily to the dirname of the target.
+      original_include_dir = self.manifest_include_dir
+      include_path = os.path.realpath(
+          os.path.join(original_include_dir, attrs['name']))
+      self.includes.append((attrs['name'], include_path))
+      self._RunParser(include_path, finalize=False)
+
+  def _EndElement(self, name):
+    """Store any child element properties into the parent element."""
+    if name == 'project':
+      assert (self._current_project_name is not None and
+              self._current_project_path is not None), (
+                  'Malformed xml: Encountered unmatched </project>')
+      self.checkouts_by_path[self._current_project_path].update(
+          self._annotations)
+      for checkout in self.checkouts_by_name[self._current_project_name]:
+        checkout.update(self._annotations)
+      self._current_project_path = None
+      self._current_project_name = None
+
+  def _FinalizeAllProjectData(self):
+    """Rewrite projects mixing defaults in and adding our attributes."""
+    for path_data in self.checkouts_by_path.values():
+      self._FinalizeProjectData(path_data)
+
+  def _FinalizeProjectData(self, attrs):
+    """Sets up useful properties for a project.
+
+    Args:
+      attrs: The attribute dictionary of a <project> tag.
+    """
+    for key in ('remote', 'revision'):
+      attrs.setdefault(key, self.default.get(key))
+
+    remote = attrs['remote']
+    assert remote in self.remotes, ('%s: %s not in %s' %
+                                    (self.source, remote, self.remotes))
+    remote_name = attrs['remote_alias'] = self.remotes[remote]['alias']
+
+    # 'repo manifest -r' adds an 'upstream' attribute to the project tag for the
+    # manifests it generates.  We can use the attribute to get a valid branch
+    # instead of a sha1 for these types of manifests.
+    upstream = attrs.get('upstream', attrs['revision'])
+    if IsSHA1(upstream):
+      # The current version of repo we use has a bug: When you create a new
+      # repo checkout from a revlocked manifest, the 'upstream' attribute will
+      # just point at a SHA1. The default revision will still be correct,
+      # however. For now, return the default revision as our best guess as to
+      # what the upstream branch for this repository would be. This guess may
+      # sometimes be wrong, but it's correct for all of the repositories where
+      # we need to push changes (e.g., the overlays).
+      # TODO(davidjames): Either fix the repo bug, or update our logic here to
+      # check the manifest repository to find the right tracking branch.
+      upstream = self.default.get('revision', 'refs/heads/master')
+
+    attrs['tracking_branch'] = 'refs/remotes/%s/%s' % (
+        remote_name, StripRefs(upstream),
+    )
+
+    site_params = config_lib.GetSiteParams()
+    attrs['pushable'] = remote in site_params.GIT_REMOTES
+    if attrs['pushable']:
+      attrs['push_remote'] = remote
+      attrs['push_remote_url'] = site_params.GIT_REMOTES[remote]
+      attrs['push_url'] = '%s/%s' % (attrs['push_remote_url'], attrs['name'])
+    groups = set(attrs.get('groups', 'default').replace(',', ' ').split())
+    groups.add('default')
+    attrs['groups'] = frozenset(groups)
+
+    # Compute the local ref space.
+    # Sanitize a couple path fragments to simplify assumptions in this
+    # class, and in consuming code.
+    attrs.setdefault('path', attrs['name'])
+    for key in ('name', 'path'):
+      attrs[key] = os.path.normpath(attrs[key])
+
+    if constants.MANIFEST_ATTR_BRANCHING in attrs:
+      assert (attrs[constants.MANIFEST_ATTR_BRANCHING] in
+              constants.MANIFEST_ATTR_BRANCHING_ALL)
+    else:
+      attrs[constants.MANIFEST_ATTR_BRANCHING] = ''
+
+  @staticmethod
+  def _GetManifestHash(source, ignore_missing=False):
+    if isinstance(source, six.string_types):
+      try:
+        # TODO(build): convert this to osutils.ReadFile once these
+        # classes are moved out into their own module (if possible;
+        # may still be cyclic).
+        with open(source, 'rb') as f:
+          return hashlib.md5(f.read()).hexdigest()
+      except EnvironmentError as e:
+        if e.errno != errno.ENOENT or not ignore_missing:
+          raise
+    source.seek(0)
+    md5 = hashlib.md5(source.read()).hexdigest()
+    source.seek(0)
+    return md5
+
+  @classmethod
+  def Cached(cls, source, manifest_include_dir=None):
+    """Return an instance, reusing an existing one if possible.
+
+    May be a seekable filehandle, or a filepath.
+    See __init__ for an explanation of these arguments.
+    """
+
+    md5 = cls._GetManifestHash(source)
+    obj, sources = cls._instance_cache.get(md5, (None, ()))
+    if manifest_include_dir is None and sources:
+      # We're being invoked in a different way than the orignal
+      # caching; disregard the cached entry.
+      # Most likely, the instantiation will explode; let it fly.
+      obj, sources = None, ()
+    for include_target, target_md5 in sources:
+      if cls._GetManifestHash(include_target, True) != target_md5:
+        obj = None
+        break
+    if obj is None:
+      obj = cls(source, manifest_include_dir=manifest_include_dir)
+      sources = tuple((abspath, cls._GetManifestHash(abspath))
+                      for (target, abspath) in obj.includes)
+      cls._instance_cache[md5] = (obj, sources)
+
+    return obj
+
+
+class ManifestCheckout(Manifest):
+  """A Manifest Handler for a specific manifest checkout."""
+
+  _instance_cache = {}
+
+  def __init__(self, path, manifest_path=None, search=True):
+    """Initialize this instance.
+
+    Args:
+      path: Path into a manifest checkout (doesn't have to be the root).
+      manifest_path: If supplied, the manifest to use.  Else the manifest
+        in the root of the checkout is used.  May be a seekable file handle.
+      search: If True, the path can point into the repo, and the root will
+        be found automatically.  If False, the path *must* be the root, else
+        an OSError ENOENT will be thrown.
+
+    Raises:
+      OSError: if a failure occurs.
+    """
+    self.root, manifest_path = self._NormalizeArgs(
+        path, manifest_path, search=search)
+
+    self.manifest_path = os.path.realpath(manifest_path)
+    # The include dir is always the manifest repo, not where the manifest file
+    # happens to live.
+    manifest_include_dir = os.path.join(self.root, '.repo', 'manifests')
+    self.manifest_branch = self._GetManifestsBranch(self.root)
+    self._content_merging = {}
+    Manifest.__init__(self, self.manifest_path,
+                      manifest_include_dir=manifest_include_dir)
+
+  @staticmethod
+  def _NormalizeArgs(path, manifest_path=None, search=True):
+    root = FindRepoCheckoutRoot(path)
+    if root is None:
+      raise OSError(errno.ENOENT, "Couldn't find repo root: %s" % (path,))
+    root = os.path.normpath(os.path.realpath(root))
+    if not search:
+      if os.path.normpath(os.path.realpath(path)) != root:
+        raise OSError(errno.ENOENT, 'Path %s is not a repo root, and search '
+                      'is disabled.' % path)
+    if manifest_path is None:
+      manifest_path = os.path.join(root, '.repo', 'manifest.xml')
+    return root, manifest_path
+
+  @staticmethod
+  def IsFullManifest(checkout_root):
+    """Returns True iff the given checkout is using a full manifest.
+
+    This method should go away as part of the cleanup related to brbug.com/854.
+
+    Args:
+      checkout_root: path to the root of an SDK checkout.
+
+    Returns:
+      True iff the manifest selected for the given SDK is a full manifest.
+      In this context we'll accept any manifest for which there are no groups
+      defined.
+    """
+    manifests_git_repo = os.path.join(checkout_root, '.repo', 'manifests.git')
+    cmd = ['config', '--local', '--get', 'manifest.groups']
+    result = RunGit(manifests_git_repo, cmd, check=False)
+
+    if result.output.strip():
+      # Full layouts don't define groups.
+      return False
+
+    return True
+
+  def FindCheckouts(self, project, branch=None):
+    """Returns the list of checkouts for a given |project|/|branch|.
+
+    Args:
+      project: Project name to search for.
+      branch: Branch to use.
+
+    Returns:
+      A list of ProjectCheckout objects.
+    """
+    checkouts = []
+    for checkout in self.checkouts_by_name.get(project, []):
+      tracking_branch = checkout['tracking_branch']
+      if branch is None or StripRefs(branch) == StripRefs(tracking_branch):
+        checkouts.append(checkout)
+    return checkouts
+
+  def FindCheckout(self, project, branch=None, strict=True):
+    """Returns the checkout associated with a given project/branch.
+
+    Args:
+      project: The project to look for.
+      branch: The branch that the project is tracking.
+      strict: Raise AssertionError if a checkout cannot be found.
+
+    Returns:
+      A ProjectCheckout object.
+
+    Raises:
+      AssertionError if there is more than one checkout associated with the
+      given project/branch combination.
+    """
+    checkouts = self.FindCheckouts(project, branch)
+    if len(checkouts) < 1:
+      if strict:
+        raise AssertionError('Could not find checkout of %s' % (project,))
+      return None
+    elif len(checkouts) > 1:
+      raise AssertionError('Too many checkouts found for %s' % project)
+    return checkouts[0]
+
+  def ListCheckouts(self):
+    """List the checkouts in the manifest.
+
+    Returns:
+      A list of ProjectCheckout objects.
+    """
+    return list(self.checkouts_by_path.values())
+
+  def FindCheckoutFromPath(self, path, strict=True):
+    """Find the associated checkouts for a given |path|.
+
+    The |path| can either be to the root of a project, or within the
+    project itself (chromite.cbuildbot for example).  It may be relative
+    to the repo root, or an absolute path.  If |path| is not within a
+    checkout, return None.
+
+    Args:
+      path: Path to examine.
+      strict: If True, fail when no checkout is found.
+
+    Returns:
+      None if no checkout is found, else the checkout.
+    """
+    # Realpath everything sans the target to keep people happy about
+    # how symlinks are handled; exempt the final node since following
+    # through that is unlikely even remotely desired.
+    tmp = os.path.join(self.root, os.path.dirname(path))
+    path = os.path.join(os.path.realpath(tmp), os.path.basename(path))
+    path = os.path.normpath(path) + '/'
+    candidates = []
+    for checkout in self.ListCheckouts():
+      if path.startswith(checkout['local_path'] + '/'):
+        candidates.append((checkout['path'], checkout))
+
+    if not candidates:
+      if strict:
+        raise AssertionError('Could not find repo project at %s' % (path,))
+      return None
+
+    # The checkout with the greatest common path prefix is the owner of
+    # the given pathway. Return that.
+    return max(candidates)[1]
+
+  def _FinalizeAllProjectData(self):
+    """Rewrite projects mixing defaults in and adding our attributes."""
+    Manifest._FinalizeAllProjectData(self)
+    for key, value in self.checkouts_by_path.items():
+      self.checkouts_by_path[key] = ProjectCheckout(value)
+    for key, value in self.checkouts_by_name.items():
+      self.checkouts_by_name[key] = \
+          [ProjectCheckout(x) for x in value]
+
+  def _FinalizeProjectData(self, attrs):
+    Manifest._FinalizeProjectData(self, attrs)
+    attrs['local_path'] = os.path.join(self.root, attrs['path'])
+
+  @staticmethod
+  def _GetManifestsBranch(root):
+    """Get the tracking branch of the manifest repository.
+
+    Returns:
+      The branch name.
+    """
+    # Suppress the normal "if it ain't refs/heads, we don't want none o' that"
+    # check for the merge target; repo writes the ambigious form of the branch
+    # target for `repo init -u url -b some-branch` usages (aka, 'master'
+    # instead of 'refs/heads/master').
+    path = os.path.join(root, '.repo', 'manifests')
+    current_branch = GetCurrentBranch(path)
+    if current_branch != 'default':
+      raise OSError(errno.ENOENT,
+                    'Manifest repository at %s is checked out to %s.  '
+                    "It should be checked out to 'default'."
+                    % (root, 'detached HEAD' if current_branch is None
+                       else current_branch))
+
+    result = GetTrackingBranchViaGitConfig(
+        path, 'default', allow_broken_merge_settings=True, for_checkout=False)
+
+    if result is not None:
+      return StripRefsHeads(result.ref, False)
+
+    raise OSError(errno.ENOENT,
+                  "Manifest repository at %s is checked out to 'default', but "
+                  'the git tracking configuration for that branch is broken; '
+                  'failing due to that.' % (root,))
+
+  # pylint: disable=arguments-differ
+  @classmethod
+  def Cached(cls, path, manifest_path=None, search=True):
+    """Return an instance, reusing an existing one if possible.
+
+    Args:
+      path: The pathway into a checkout; the root will be found automatically.
+      manifest_path: if given, the manifest.xml to use instead of the
+        checkouts internal manifest.  Use with care.
+      search: If True, the path can point into the repo, and the root will
+        be found automatically.  If False, the path *must* be the root, else
+        an OSError ENOENT will be thrown.
+    """
+    root, manifest_path = cls._NormalizeArgs(path, manifest_path,
+                                             search=search)
+
+    md5 = cls._GetManifestHash(manifest_path)
+    obj, sources = cls._instance_cache.get((root, md5), (None, ()))
+    for include_target, target_md5 in sources:
+      if cls._GetManifestHash(include_target, True) != target_md5:
+        obj = None
+        break
+    if obj is None:
+      obj = cls(root, manifest_path=manifest_path)
+      sources = tuple((abspath, cls._GetManifestHash(abspath))
+                      for (target, abspath) in obj.includes)
+      cls._instance_cache[(root, md5)] = (obj, sources)
+    return obj
+
+
+def RunGit(git_repo, cmd, **kwargs):
+  """Wrapper for git commands.
+
+  This suppresses print_cmd, and suppresses output by default.  Git
+  functionality w/in this module should use this unless otherwise
+  warranted, to standardize git output (primarily, keeping it quiet
+  and being able to throw useful errors for it).
+
+  Args:
+    git_repo: Pathway to the git repo to operate on.
+    cmd: A sequence of the git subcommand to run.  The 'git' prefix is
+      added automatically.  If you wished to run 'git remote update',
+      this would be ['remote', 'update'] for example.
+    kwargs: Any run or GenericRetry options/overrides to use.
+
+  Returns:
+    A CommandResult object.
+  """
+  kwargs.setdefault('print_cmd', False)
+  kwargs.setdefault('cwd', git_repo)
+  kwargs.setdefault('capture_output', True)
+  kwargs.setdefault('encoding', 'utf-8')
+  return cros_build_lib.run(['git'] + cmd, **kwargs)
+
+
+def Init(git_repo):
+  """Create a new git repository, in the given location.
+
+  Args:
+    git_repo: Path for where to create a git repo. Directory will be created if
+              it doesnt exist.
+  """
+  osutils.SafeMakedirs(git_repo)
+  RunGit(git_repo, ['init'])
+
+
+def Clone(dest_path, git_url, reference=None, depth=None, branch=None,
+          single_branch=False):
+  """Clone a git repository, into the given directory.
+
+  Args:
+    dest_path: Path to clone into. Will be created if it doesn't exist.
+    git_url: Git URL to clone from.
+    reference: Path to a git repositry to reference in the clone. See
+      documentation for `git clone --reference`.
+    depth: Create a shallow clone with the given history depth. Cannot be used
+      with 'reference'.
+    branch: Branch to use for the initial HEAD. Defaults to the remote's HEAD.
+    single_branch: Clone only the requested branch.
+  """
+  if reference and depth:
+    raise ValueError('reference and depth are mutually exclusive')
+  osutils.SafeMakedirs(dest_path)
+  cmd = ['clone', git_url, dest_path]
+  if reference:
+    cmd += ['--reference', reference]
+  if depth:
+    cmd += ['--depth', str(int(depth))]
+  if branch:
+    cmd += ['--branch', branch]
+  if single_branch:
+    cmd += ['--single-branch']
+  RunGit(dest_path, cmd, print_cmd=True)
+
+
+def ShallowFetch(git_repo, git_url, sparse_checkout=None):
+  """Fetch a shallow git repository.
+
+  Args:
+    git_repo: Path of the git repo.
+    git_url: Url to fetch the git repository from.
+    sparse_checkout: List of file paths to fetch.
+  """
+  Init(git_repo)
+  RunGit(git_repo, ['remote', 'add', 'origin', git_url])
+  if sparse_checkout is not None:
+    assert isinstance(sparse_checkout, list)
+    RunGit(git_repo, ['config', 'core.sparsecheckout', 'true'])
+    osutils.WriteFile(os.path.join(git_repo, '.git/info/sparse-checkout'),
+                      '\n'.join(sparse_checkout))
+    logging.info('Sparse checkout: %s', sparse_checkout)
+
+  utcnow = datetime.datetime.utcnow
+  start = utcnow()
+  # Only fetch TOT git metadata without revision history.
+  RunGit(git_repo, ['fetch', '--depth=1'],
+         print_cmd=True, stderr=True, capture_output=False)
+  # Pull the files in sparse_checkout.
+  RunGit(git_repo, ['pull', 'origin', 'master'],
+         print_cmd=True, stderr=True, capture_output=False)
+  logging.info('ShallowFetch completed in %s.', utcnow() - start)
+
+
+def FindGitTopLevel(path):
+  """Returns the top-level directory of the given git working tree path."""
+  try:
+    ret = RunGit(path, ['rev-parse', '--show-toplevel'])
+    return ret.output.strip()
+  except cros_build_lib.RunCommandError:
+    return None
+
+
+def GetProjectUserEmail(git_repo):
+  """Get the email configured for the project."""
+  output = RunGit(git_repo, ['var', 'GIT_COMMITTER_IDENT']).output
+  m = re.search(r'<([^>]*)>', output.strip())
+  return m.group(1) if m else None
+
+
+def MatchBranchName(git_repo, pattern, namespace=''):
+  """Return branches who match the specified regular expression.
+
+  Args:
+    git_repo: The git repository to operate upon.
+    pattern: The regexp to search with.
+    namespace: The namespace to restrict search to (e.g. 'refs/heads/').
+
+  Returns:
+    List of matching branch names (with |namespace| trimmed).
+  """
+  output = RunGit(git_repo, ['ls-remote', git_repo, namespace + '*']).output
+  branches = [x.split()[1] for x in output.splitlines()]
+  branches = [x[len(namespace):] for x in branches if x.startswith(namespace)]
+
+  # Try exact match first.
+  match = re.compile(r'(^|/)%s$' % (pattern,), flags=re.I)
+  ret = [x for x in branches if match.search(x)]
+  if ret:
+    return ret
+
+  # Fall back to regex match if no exact match.
+  match = re.compile(pattern, flags=re.I)
+  return [x for x in branches if match.search(x)]
+
+
+class AmbiguousBranchName(Exception):
+  """Error if given branch name matches too many branches."""
+
+
+def MatchSingleBranchName(*args, **kwargs):
+  """Match exactly one branch name, else throw an exception.
+
+  Args:
+    See MatchBranchName for more details; all args are passed on.
+
+  Returns:
+    The branch name.
+
+  Raises:
+    raise AmbiguousBranchName if we did not match exactly one branch.
+  """
+  ret = MatchBranchName(*args, **kwargs)
+  if len(ret) != 1:
+    raise AmbiguousBranchName('Did not match exactly 1 branch: %r' % ret)
+  return ret[0]
+
+
+def GetTrackingBranchViaGitConfig(git_repo, branch, for_checkout=True,
+                                  allow_broken_merge_settings=False,
+                                  recurse=10):
+  """Pull the remote and upstream branch of a local branch
+
+  Args:
+    git_repo: The git repository to operate upon.
+    branch: The branch to inspect.
+    for_checkout: Whether to return localized refspecs, or the remote's
+      view of it.
+    allow_broken_merge_settings: Repo in a couple of spots writes invalid
+      branch.mybranch.merge settings; if these are encountered, they're
+      normally treated as an error and this function returns None.  If
+      this option is set to True, it suppresses this check.
+    recurse: If given and the target is local, then recurse through any
+      remote=. (aka locals).  This is enabled by default, and is what allows
+      developers to have multiple local branches of development dependent
+      on one another; disabling this makes that work flow impossible,
+      thus disable it only with good reason.  The value given controls how
+      deeply to recurse.  Defaults to tracing through 10 levels of local
+      remotes. Disabling it is a matter of passing 0.
+
+  Returns:
+    A RemoteRef, or None.  If for_checkout, then it returns the localized
+    version of it.
+  """
+  try:
+    cmd = ['config', '--get-regexp',
+           r'branch\.%s\.(remote|merge)' % re.escape(branch)]
+    data = RunGit(git_repo, cmd).output.splitlines()
+
+    prefix = 'branch.%s.' % (branch,)
+    data = [x.split() for x in data]
+    vals = dict((x[0][len(prefix):], x[1]) for x in data)
+    if len(vals) != 2:
+      if not allow_broken_merge_settings:
+        return None
+      elif 'merge' not in vals:
+        # There isn't anything we can do here.
+        return None
+      elif 'remote' not in vals:
+        # Repo v1.9.4 and up occasionally invalidly leave the remote out.
+        # Only occurs for the manifest repo fortunately.
+        vals['remote'] = 'origin'
+    remote, rev = vals['remote'], vals['merge']
+    # Suppress non branches; repo likes to write revisions and tags here,
+    # which is wrong (git hates it, nor will it honor it).
+    if rev.startswith('refs/remotes/'):
+      if for_checkout:
+        return RemoteRef(remote, rev)
+      # We can't backtrack from here, or at least don't want to.
+      # This is likely refs/remotes/m/ which repo writes when dealing
+      # with a revision locked manifest.
+      return None
+    if not rev.startswith('refs/heads/'):
+      # We explicitly don't allow pushing to tags, nor can one push
+      # to a sha1 remotely (makes no sense).
+      if not allow_broken_merge_settings:
+        return None
+    elif remote == '.':
+      if recurse == 0:
+        raise Exception(
+            'While tracing out tracking branches, we recursed too deeply: '
+            'bailing at %s' % branch)
+      return GetTrackingBranchViaGitConfig(
+          git_repo, StripRefsHeads(rev), for_checkout=for_checkout,
+          allow_broken_merge_settings=allow_broken_merge_settings,
+          recurse=recurse - 1)
+    elif for_checkout:
+      rev = 'refs/remotes/%s/%s' % (remote, StripRefsHeads(rev))
+    return RemoteRef(remote, rev)
+  except cros_build_lib.RunCommandError as e:
+    # 1 is the retcode for no matches.
+    if e.result.returncode != 1:
+      raise
+  return None
+
+
+def GetTrackingBranchViaManifest(git_repo, for_checkout=True, for_push=False,
+                                 manifest=None):
+  """Gets the appropriate push branch via the manifest if possible.
+
+  Args:
+    git_repo: The git repo to operate upon.
+    for_checkout: Whether to return localized refspecs, or the remote's
+      view of it.  Note that depending on the remote, the remote may differ
+      if for_push is True or set to False.
+    for_push: Controls whether the remote and refspec returned is explicitly
+      for pushing.
+    manifest: A Manifest instance if one is available, else a
+      ManifestCheckout is created and used.
+
+  Returns:
+    A RemoteRef, or None.  If for_checkout, then it returns the localized
+    version of it.
+  """
+  try:
+    if manifest is None:
+      manifest = ManifestCheckout.Cached(git_repo)
+
+    checkout = manifest.FindCheckoutFromPath(git_repo, strict=False)
+
+    if checkout is None:
+      return None
+
+    if for_push:
+      checkout.AssertPushable()
+
+    if for_push:
+      remote = checkout['push_remote']
+    else:
+      remote = checkout['remote']
+
+    if for_checkout:
+      revision = checkout['tracking_branch']
+    else:
+      revision = checkout['revision']
+      if not revision.startswith('refs/heads/'):
+        return None
+
+    project_name = checkout.get('name', None)
+
+    return RemoteRef(remote, revision, project_name=project_name)
+  except EnvironmentError as e:
+    if e.errno != errno.ENOENT:
+      raise
+  return None
+
+
+def GetTrackingBranch(git_repo, branch=None, for_checkout=True, fallback=True,
+                      manifest=None, for_push=False):
+  """Gets the appropriate push branch for the specified directory.
+
+  This function works on both repo projects and regular git checkouts.
+
+  Assumptions:
+   1. We assume the manifest defined upstream is desirable.
+   2. No manifest?  Assume tracking if configured is accurate.
+   3. If none of the above apply, you get 'origin', 'master' or None,
+      depending on fallback.
+
+  Args:
+    git_repo: Git repository to operate upon.
+    branch: Find the tracking branch for this branch.  Defaults to the
+      current branch for |git_repo|.
+    for_checkout: Whether to return localized refspecs, or the remotes
+      view of it.
+    fallback: If true and no remote/branch could be discerned, return
+      'origin', 'master'.  If False, you get None.
+      Note that depending on the remote, the remote may differ
+      if for_push is True or set to False.
+    for_push: Controls whether the remote and refspec returned is explicitly
+      for pushing.
+    manifest: A Manifest instance if one is available, else a
+      ManifestCheckout is created and used.
+
+  Returns:
+    A RemoteRef, or None.
+  """
+  result = GetTrackingBranchViaManifest(git_repo, for_checkout=for_checkout,
+                                        manifest=manifest, for_push=for_push)
+  if result is not None:
+    return result
+
+  if branch is None:
+    branch = GetCurrentBranch(git_repo)
+  if branch:
+    result = GetTrackingBranchViaGitConfig(git_repo, branch,
+                                           for_checkout=for_checkout)
+    if result is not None:
+      if (result.ref.startswith('refs/heads/') or
+          result.ref.startswith('refs/remotes/')):
+        return result
+
+  if not fallback:
+    return None
+  if for_checkout:
+    return RemoteRef('origin', 'refs/remotes/origin/master')
+  return RemoteRef('origin', 'master')
+
+
+def CreateBranch(git_repo, branch, branch_point='HEAD', track=False):
+  """Create a branch.
+
+  Args:
+    git_repo: Git repository to act on.
+    branch: Name of the branch to create.
+    branch_point: The ref to branch from.  Defaults to 'HEAD'.
+    track: Whether to setup the branch to track its starting ref.
+  """
+  cmd = ['checkout', '-B', branch, branch_point]
+  if track:
+    cmd.append('--track')
+  RunGit(git_repo, cmd)
+
+
+def AddPath(path):
+  """Use 'git add' on a path.
+
+  Args:
+    path: Path to the git repository and the path to add.
+  """
+  dirname, filename = os.path.split(path)
+  RunGit(dirname, ['add', '--', filename])
+
+
+def RmPath(path):
+  """Use 'git rm' on a file.
+
+  Args:
+    path: Path to the git repository and the path to rm.
+  """
+  dirname, filename = os.path.split(path)
+  RunGit(dirname, ['rm', '--', filename])
+
+
+def GetObjectAtRev(git_repo, obj, rev, binary=False):
+  """Return the contents of a git object at a particular revision.
+
+  This could be used to look at an old version of a file or directory, for
+  instance, without modifying the working directory.
+
+  Args:
+    git_repo: Path to a directory in the git repository to query.
+    obj: The name of the object to read.
+    rev: The revision to retrieve.
+    binary: If true, return bytes instead of decoding as a UTF-8 string.
+
+  Returns:
+    The content of the object.
+  """
+  rev_obj = '%s:%s' % (rev, obj)
+  encoding = None if binary else 'utf-8'
+  return RunGit(git_repo, ['show', rev_obj], encoding=encoding).output
+
+
+def RevertPath(git_repo, filename, rev):
+  """Revert a single file back to a particular revision and 'add' it with git.
+
+  Args:
+    git_repo: Path to the directory holding the file.
+    filename: Name of the file to revert.
+    rev: Revision to revert the file to.
+  """
+  RunGit(git_repo, ['checkout', rev, '--', filename])
+
+
+# In Log, we use "format" to refer to the --format flag to
+# git. Disable the nags from pylint.
+# pylint: disable=redefined-builtin
+def Log(git_repo, format=None, after=None, until=None,
+        reverse=False, date=None, max_count=None, grep=None,
+        rev='HEAD', paths=None):
+  """Return git log output for the given arguments.
+
+  For more detailed description of the parameters, run `git help log`.
+
+  Args:
+    git_repo: Path to a directory in the git repository.
+    format: Passed directly to the --format flag.
+    after: Passed directly to --after flag.
+    until: Passed directly to --until flag.
+    reverse: If true, set --reverse flag.
+    date: Passed directly to --date flag.
+    max_count: Passed directly to --max-count flag.
+    grep: Passed directly to --grep flag.
+    rev: Commit (or revision range) to log.
+    paths: List of paths to log commits for (enumerated after final -- ).
+
+  Returns:
+    The raw log output as a string.
+  """
+  cmd = ['log']
+  if format:
+    cmd.append('--format=%s' % format)
+  if after:
+    cmd.append('--after=%s' % after)
+  if until:
+    cmd.append('--until=%s' % until)
+  if reverse:
+    cmd.append('--reverse')
+  if date:
+    cmd.append('--date=%s' % date)
+  if max_count:
+    cmd.append('--max-count=%s' % max_count)
+  if grep:
+    cmd.append('--grep=%s' % grep)
+  cmd.append(rev)
+  if paths:
+    cmd.append('--')
+    cmd.extend(paths)
+  return RunGit(git_repo, cmd, errors='replace').stdout
+# pylint: enable=redefined-builtin
+
+
+def GetChangeId(git_repo, rev='HEAD'):
+  """Retrieve the Change-Id from the commit message
+
+  Args:
+    git_repo: Path to the git repository where the commit is
+    rev: Commit to inspect, defaults to HEAD
+
+  Returns:
+    The Gerrit Change-Id assigned to the commit if it exists.
+  """
+  log = Log(git_repo, max_count=1, format='format:%B', rev=rev)
+  m = re.findall(r'^Change-Id: (I[a-fA-F0-9]{40})$', log, flags=re.M)
+  if not m:
+    return None
+  elif len(m) > 1:
+    raise ValueError('Too many Change-Ids found')
+  else:
+    return m[0]
+
+
+def Commit(git_repo, message, amend=False, allow_empty=False,
+           reset_author=False):
+  """Commit with git.
+
+  Args:
+    git_repo: Path to the git repository to commit in.
+    message: Commit message to use.
+    amend: Whether to 'amend' the CL, default False
+    allow_empty: Whether to allow an empty commit. Default False.
+    reset_author: Whether to reset author according to current config.
+
+  Returns:
+    The Gerrit Change-ID assigned to the CL if it exists.
+  """
+  cmd = ['commit', '-m', message]
+  if amend:
+    cmd.append('--amend')
+  if allow_empty:
+    cmd.append('--allow-empty')
+  if reset_author:
+    cmd.append('--reset-author')
+  RunGit(git_repo, cmd)
+  return GetChangeId(git_repo)
+
+
+_raw_diff_components = ('src_mode', 'dst_mode', 'src_sha', 'dst_sha',
+                        'status', 'score', 'src_file', 'dst_file')
+# RawDiffEntry represents a line of raw formatted git diff output.
+RawDiffEntry = collections.namedtuple('RawDiffEntry', _raw_diff_components)
+
+
+# This regular expression pulls apart a line of raw formatted git diff output.
+DIFF_RE = re.compile(
+    r':(?P<src_mode>[0-7]*) (?P<dst_mode>[0-7]*) '
+    r'(?P<src_sha>[0-9a-f]*)(\.)* (?P<dst_sha>[0-9a-f]*)(\.)* '
+    r'(?P<status>[ACDMRTUX])(?P<score>[0-9]+)?\t'
+    r'(?P<src_file>[^\t]+)\t?(?P<dst_file>[^\t]+)?')
+
+
+def RawDiff(path, target):
+  """Return the parsed raw format diff of target
+
+  Args:
+    path: Path to the git repository to diff in.
+    target: The target to diff.
+
+  Returns:
+    A list of RawDiffEntry's.
+  """
+  entries = []
+
+  cmd = ['diff', '-M', '--raw', target]
+  diff = RunGit(path, cmd).output
+  diff_lines = diff.strip().splitlines()
+  for line in diff_lines:
+    match = DIFF_RE.match(line)
+    if not match:
+      raise GitException('Failed to parse diff output: %s' % line)
+    entries.append(RawDiffEntry(*match.group(*_raw_diff_components)))
+
+  return entries
+
+
+def UploadCL(git_repo, remote, branch, local_branch='HEAD', draft=False,
+             reviewers=None, **kwargs):
+  """Upload a CL to gerrit. The CL should be checked out currently.
+
+  Args:
+    git_repo: Path to the git repository with the CL to upload checked out.
+    remote: The remote to upload the CL to.
+    branch: Branch to upload to.
+    local_branch: Branch to upload.
+    draft: Whether to upload as a draft.
+    reviewers: Add the reviewers to the CL.
+    kwargs: Extra options for GitPush. capture_output defaults to False so
+      that the URL for new or updated CLs is shown to the user.
+  """
+  ref = ('refs/drafts/%s' if draft else 'refs/for/%s') % branch
+  if reviewers:
+    reviewer_list = ['r=%s' % i for i in reviewers]
+    ref = ref + '%'+ ','.join(reviewer_list)
+  remote_ref = RemoteRef(remote, ref)
+  kwargs.setdefault('capture_output', False)
+  kwargs.setdefault('stderr', subprocess.STDOUT)
+  return GitPush(git_repo, local_branch, remote_ref, **kwargs)
+
+
+def GitPush(git_repo, refspec, push_to, force=False, dry_run=False,
+            capture_output=True, skip=False, **kwargs):
+  """Wrapper for pushing to a branch.
+
+  Args:
+    git_repo: Git repository to act on.
+    refspec: The local ref to push to the remote.
+    push_to: A RemoteRef object representing the remote ref to push to.
+    force: Whether to bypass non-fastforward checks.
+    dry_run: If True, do everything except actually push the remote ref.
+    capture_output: Whether to capture output for this command.
+    skip: Log the git command that would have been run, but don't run it; this
+      avoids e.g. remote access checks that still apply to |dry_run|.
+  """
+  cmd = ['push', push_to.remote, '%s:%s' % (refspec, push_to.ref)]
+  if force:
+    cmd.append('--force')
+  if dry_run:
+    cmd.append('--dry-run')
+
+  if skip:
+    logging.info('Would have run "%s"', cmd)
+    return
+
+  return RunGit(git_repo, cmd, capture_output=capture_output,
+                **kwargs)
+
+
+# TODO(build): Switch callers of this function to use CreateBranch instead.
+def CreatePushBranch(branch, git_repo, sync=True, remote_push_branch=None):
+  """Create a local branch for pushing changes inside a repo repository.
+
+  Args:
+    branch: Local branch to create.
+    git_repo: Git repository to create the branch in.
+    sync: Update remote before creating push branch.
+    remote_push_branch: A RemoteRef to push to. i.e.,
+                        RemoteRef('cros', 'master').  By default it tries to
+                        automatically determine which tracking branch to use
+                        (see GetTrackingBranch()).
+  """
+  if not remote_push_branch:
+    remote_push_branch = GetTrackingBranch(git_repo, for_push=True)
+
+  if sync:
+    cmd = ['remote', 'update', remote_push_branch.remote]
+    RunGit(git_repo, cmd)
+
+  RunGit(git_repo, ['checkout', '-B', branch, '-t', remote_push_branch.ref])
+
+
+def SyncPushBranch(git_repo, remote, target, use_merge=False, **kwargs):
+  """Sync and rebase or merge a local push branch to the latest remote version.
+
+  Args:
+    git_repo: Git repository to rebase in.
+    remote: The remote returned by GetTrackingBranch(for_push=True)
+    target: The branch name returned by GetTrackingBranch().  Must
+      start with refs/remotes/ (specifically must be a proper remote
+      target rather than an ambiguous name).
+    use_merge: Default: False. If True, use merge to bring local branch up to
+      date with remote branch. Otherwise, use rebase.
+    kwargs: Arguments passed through to RunGit.
+  """
+  subcommand = 'merge' if use_merge else 'rebase'
+
+  if not target.startswith('refs/remotes/'):
+    raise Exception(
+        'Was asked to %s to a non branch target w/in the push pathways.  '
+        'This is highly indicative of an internal bug.  remote %s, %s %s'
+        % (subcommand, remote, subcommand, target))
+
+  cmd = ['remote', 'update', remote]
+  RunGit(git_repo, cmd, **kwargs)
+
+  try:
+    RunGit(git_repo, [subcommand, target], **kwargs)
+  except cros_build_lib.RunCommandError:
+    # Looks like our change conflicts with upstream. Cleanup our failed
+    # rebase.
+    RunGit(git_repo, [subcommand, '--abort'], check=False, **kwargs)
+    raise
+
+
+def PushBranch(branch, git_repo, dryrun=False,
+               staging_branch=None, auto_merge=False):
+  """General method to push local git changes.
+
+  This method only works with branches created via the CreatePushBranch
+  function.
+
+  Args:
+    branch: Local branch to push.  Branch should have already been created
+      with a local change committed ready to push to the remote branch.  Must
+      also already be checked out to that branch.
+    git_repo: Git repository to push from.
+    dryrun: Git push --dry-run if set to True.
+    staging_branch: Push change commits to the staging_branch if it's not None
+    auto_merge: Enable Gerrit's auto-merge feature. See here for more info:
+      https://gerrit-review.googlesource.com/Documentation/user-upload.html#auto_merge
+      Note: The setting must be enabled in Gerrit UI for the specific repo.
+
+  Raises:
+    GitPushFailed if push was unsuccessful after retries
+  """
+  remote_ref = GetTrackingBranch(git_repo, branch, for_checkout=False,
+                                 for_push=True)
+  # Don't like invoking this twice, but there is a bit of API
+  # impedence here; cros_mark_as_stable
+  local_ref = GetTrackingBranch(git_repo, branch, for_push=True)
+
+  if not remote_ref.ref.startswith('refs/heads/'):
+    raise Exception('Was asked to push to a non branch namespace: %s' %
+                    remote_ref.ref)
+
+  if auto_merge:
+    remote_ref = RemoteRef(remote=remote_ref.remote,
+                           ref=remote_ref.ref.replace(
+                               'heads', 'for', 1) + '%notify=NONE,submit',
+                           project_name=remote_ref.project_name)
+  # reference = staging_branch if staging_branch is not None else remote_ref.ref
+  if staging_branch is not None:
+    remote_ref = remote_ref._replace(ref=staging_branch)
+
+  logging.debug('Trying to push %s to %s:%s',
+                git_repo, branch, remote_ref.ref)
+
+  if dryrun:
+    dryrun = True
+
+  SyncPushBranch(git_repo, remote_ref.remote, local_ref.ref)
+
+  try:
+    GitPush(git_repo, branch, remote_ref, skip=dryrun, print_cmd=True,
+            debug_level=logging.DEBUG)
+  except cros_build_lib.RunCommandError:
+    raise
+
+  logging.info('Successfully pushed %s to %s %s:%s',
+               git_repo, remote_ref.remote, branch, remote_ref.ref)
+
+
+def CleanAndDetachHead(git_repo):
+  """Remove all local changes and checkout a detached head.
+
+  Args:
+    git_repo: Directory of git repository.
+  """
+  RunGit(git_repo, ['am', '--abort'], check=False)
+  RunGit(git_repo, ['rebase', '--abort'], check=False)
+  RunGit(git_repo, ['clean', '-dfx'])
+  RunGit(git_repo, ['checkout', '--detach', '-f', 'HEAD'])
+
+
+def CleanAndCheckoutUpstream(git_repo, refresh_upstream=True):
+  """Remove all local changes and checkout the latest origin.
+
+  All local changes in the supplied repo will be removed. The branch will
+  also be switched to a detached head pointing at the latest origin.
+
+  Args:
+    git_repo: Directory of git repository.
+    refresh_upstream: If True, run a remote update prior to checking it out.
+  """
+  remote_ref = GetTrackingBranch(git_repo, for_push=refresh_upstream)
+  CleanAndDetachHead(git_repo)
+  if refresh_upstream:
+    RunGit(git_repo, ['remote', 'update', remote_ref.remote])
+  RunGit(git_repo, ['checkout', remote_ref.ref])
+
+
+def GetChromiteTrackingBranch():
+  """Returns the remote branch associated with chromite."""
+  cwd = os.path.dirname(os.path.realpath(__file__))
+  result_ref = GetTrackingBranch(cwd, for_checkout=False, fallback=False)
+  if result_ref:
+    branch = result_ref.ref
+    if branch.startswith('refs/heads/'):
+      # Normal scenario.
+      return StripRefsHeads(branch)
+    # Reaching here means it was refs/remotes/m/blah, or just plain invalid,
+    # or that we're on a detached head in a repo not managed by chromite.
+
+  # Manually try the manifest next.
+  try:
+    manifest = ManifestCheckout.Cached(cwd)
+    # Ensure the manifest knows of this checkout.
+    if manifest.FindCheckoutFromPath(cwd, strict=False):
+      return manifest.manifest_branch
+  except EnvironmentError as e:
+    if e.errno != errno.ENOENT:
+      raise
+
+  # Not a manifest checkout.
+  logging.notice(
+      "Chromite checkout at %s isn't controlled by repo, nor is it on a "
+      'branch (or if it is, the tracking configuration is missing or broken).  '
+      'Falling back to assuming the chromite checkout is derived from '
+      "'master'; this *may* result in breakage." % cwd)
+  return 'master'
+
+
+def GarbageCollection(git_repo, prune_all=False):
+  """Cleanup unnecessary files and optimize the local repository.
+
+  Args:
+    git_repo: Directory of git repository.
+    prune_all: If True, prune all loose objects regardless of gc.pruneExpire.
+  """
+  # Use --auto so it only runs if housekeeping is necessary.
+  cmd = ['gc', '--auto']
+  if prune_all:
+    cmd.append('--prune=all')
+  RunGit(git_repo, cmd)
+
+
+def DeleteStaleLocks(git_repo):
+  """Clean up stale locks left behind in a git repo.
+
+  This might occur if an earlier git command was killed during an operation.
+  Warning: This is dangerous because these locks are intended to prevent
+  corruption. Only use this if you are sure that no other git process is
+  accessing the repo (such as at the beginning of a fresh build).
+
+  Args"
+    git_repo: Directory of git repository.
+  """
+  git_gitdir = GetGitGitdir(git_repo)
+  if not git_gitdir:
+    raise GitException('Not a valid git repo: %s' % git_repo)
+
+  for root, _, filenames in os.walk(git_gitdir):
+    for filename in fnmatch.filter(filenames, '*.lock'):
+      p = os.path.join(root, filename)
+      logging.info('Found stale git lock, removing: %s', p)
+      os.remove(p)
diff --git a/utils/frozen_chromite/lib/gob_util.py b/utils/frozen_chromite/lib/gob_util.py
new file mode 100644
index 0000000..eb33abb
--- /dev/null
+++ b/utils/frozen_chromite/lib/gob_util.py
@@ -0,0 +1,856 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utilities for requesting information for a gerrit server via https.
+
+https://gerrit-review.googlesource.com/Documentation/rest-api.html
+"""
+
+from __future__ import print_function
+
+import datetime
+import json
+import os
+import re
+import socket
+import sys
+import warnings
+
+import httplib2
+try:
+  from oauth2client import gce
+except ImportError:  # Newer oauth2client versions put it in .contrib
+  # pylint: disable=import-error,no-name-in-module
+  from oauth2client.contrib import gce
+import six
+from six.moves import html_parser as HTMLParser
+from six.moves import http_client as httplib
+from six.moves import http_cookiejar as cookielib
+from six.moves import urllib
+
+from autotest_lib.utils.frozen_chromite.lib import auth
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import git
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.utils import memoize
+
+
+_GAE_VERSION = 'GAE_VERSION'
+
+
+class ErrorParser(HTMLParser.HTMLParser):
+  """Class to parse GOB error message reported as HTML.
+
+  Only data inside <div id='af-error-container'> section is retrieved from the
+  GOB error message. Retrieved data is processed as follows:
+
+  - newlines are removed
+  - each <br> tag is replaced with '\n'
+  - each <p> tag is replaced with '\n\n'
+  """
+
+  def __init__(self):
+    HTMLParser.HTMLParser.__init__(self)
+    self.in_div = False
+    self.err_data = ''
+
+  def handle_starttag(self, tag, attrs):
+    tag_id = [x[1] for x in attrs if x[0] == 'id']
+    if tag == 'div' and tag_id and tag_id[0] == 'af-error-container':
+      self.in_div = True
+      return
+
+    if self.in_div:
+      if tag == 'p':
+        self.err_data += '\n\n'
+        return
+
+      if tag == 'br':
+        self.err_data += '\n'
+        return
+
+  def handle_endtag(self, tag):
+    if tag == 'div':
+      self.in_div = False
+
+  def handle_data(self, data):
+    if self.in_div:
+      self.err_data += data.replace('\n', '')
+
+  def ParsedDiv(self):
+    return self.err_data.strip()
+
+
[email protected]
+def _GetAppCredentials():
+  """Returns the singleton Appengine credentials for gerrit code review."""
+  return gce.AppAssertionCredentials(
+      scope='https://www.googleapis.com/auth/gerritcodereview')
+
+
+TRY_LIMIT = 11
+SLEEP = 0.5
+REQUEST_TIMEOUT_SECONDS = 120  # 2 minutes.
+
+# Controls the transport protocol used to communicate with Gerrit servers using
+# git. This is parameterized primarily to enable cros_test_lib.GerritTestCase.
+GIT_PROTOCOL = 'https'
+
+# The GOB conflict errors which could be ignorable.
+GOB_CONFLICT_ERRORS = (
+    br'change is closed',
+    br'Cannot reduce vote on labels for closed change',
+)
+
+GOB_CONFLICT_ERRORS_RE = re.compile(br'|'.join(GOB_CONFLICT_ERRORS),
+                                    re.IGNORECASE)
+
+GOB_ERROR_REASON_CLOSED_CHANGE = 'CLOSED CHANGE'
+
+
+class GOBError(Exception):
+  """Exception class for errors commuicating with the gerrit-on-borg service."""
+  def __init__(self, http_status=None, reason=None):
+    self.http_status = http_status
+    self.reason = reason
+
+    message = ''
+    if http_status is not None:
+      message += '(http_status): %d' % (http_status,)
+    if reason is not None:
+      message += '(reason): %s' % (reason,)
+    if not message:
+      message = 'Unknown error'
+
+    super(GOBError, self).__init__(message)
+
+
+class InternalGOBError(GOBError):
+  """Exception class for GOB errors with status >= 500"""
+
+
+def _QueryString(param_dict, first_param=None):
+  """Encodes query parameters in the key:val[+key:val...] format specified here:
+
+  https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
+  """
+  q = [urllib.parse.quote(first_param)] if first_param else []
+  q.extend(['%s:%s' % (key, val) for key, val in param_dict.items()])
+  return '+'.join(q)
+
+
+def GetCookies(host, path, cookie_paths=None):
+  """Returns cookies that should be set on a request.
+
+  Used by CreateHttpConn for any requests that do not already specify a Cookie
+  header. All requests made by this library are HTTPS.
+
+  Args:
+    host: The hostname of the Gerrit service.
+    path: The path on the Gerrit service, already including /a/ if applicable.
+    cookie_paths: Files to look in for cookies. Defaults to looking in the
+      standard places where GoB places cookies.
+
+  Returns:
+    A dict of cookie name to value, with no URL encoding applied.
+  """
+  cookies = {}
+  if cookie_paths is None:
+    cookie_paths = (constants.GOB_COOKIE_PATH, constants.GITCOOKIES_PATH)
+  for cookie_path in cookie_paths:
+    if os.path.isfile(cookie_path):
+      with open(cookie_path) as f:
+        for line in f:
+          fields = line.strip().split('\t')
+          if line.strip().startswith('#') or len(fields) != 7:
+            continue
+          domain, xpath, key, value = fields[0], fields[2], fields[5], fields[6]
+          if cookielib.domain_match(host, domain) and path.startswith(xpath):
+            cookies[key] = value
+  return cookies
+
+
+def CreateHttpConn(host, path, reqtype='GET', headers=None, body=None):
+  """Opens an https connection to a gerrit service, and sends a request."""
+  path = '/a/' + path.lstrip('/')
+  headers = headers or {}
+  if _InAppengine():
+    # TODO(phobbs) how can we choose to only run this on GCE / AppEngine?
+    credentials = _GetAppCredentials()
+    try:
+      headers.setdefault(
+          'Authorization',
+          'Bearer %s' % credentials.get_access_token().access_token)
+    except gce.HttpAccessTokenRefreshError as e:
+      logging.debug('Failed to retreive gce access token: %s', e)
+    # Not in an Appengine or GCE environment.
+    except httplib2.ServerNotFoundError as e:
+      pass
+
+  cookies = GetCookies(host, path)
+  if 'Cookie' not in headers and cookies:
+    headers['Cookie'] = '; '.join('%s=%s' % (n, v) for n, v in cookies.items())
+  elif 'Authorization' not in headers:
+    try:
+      git_creds = auth.GitCreds()
+    except auth.AccessTokenError:
+      git_creds = None
+    if git_creds:
+      headers.setdefault('Authorization', 'Bearer %s' % git_creds)
+    else:
+      logging.debug(
+          'No gitcookies file, Appengine credentials, or LUCI git creds found.')
+
+  if 'User-Agent' not in headers:
+    # We may not be in a git repository.
+    try:
+      version = git.GetGitRepoRevision(
+          os.path.dirname(os.path.realpath(__file__)))
+    except cros_build_lib.RunCommandError:
+      version = 'unknown'
+    headers['User-Agent'] = ' '.join((
+        'autotest.chromite.lib.gob_util',
+        os.path.basename(sys.argv[0]),
+        version,
+    ))
+
+  if body:
+    body = json.JSONEncoder().encode(body)
+    headers.setdefault('Content-Type', 'application/json')
+  if logging.getLogger().isEnabledFor(logging.DEBUG):
+    logging.debug('%s https://%s%s', reqtype, host, path)
+    for key, val in headers.items():
+      if key.lower() in ('authorization', 'cookie'):
+        val = 'HIDDEN'
+      logging.debug('%s: %s', key, val)
+    if body:
+      logging.debug(body)
+  conn = httplib.HTTPSConnection(host)
+  conn.req_host = host
+  conn.req_params = {
+      'url': path,
+      'method': reqtype,
+      'headers': headers,
+      'body': body,
+  }
+  conn.request(**conn.req_params)
+  return conn
+
+
+def _InAppengine():
+  """Returns whether we're in the Appengine environment."""
+  return _GAE_VERSION in os.environ
+
+
+def FetchUrl(host, path, reqtype='GET', headers=None, body=None,
+             ignore_204=False, ignore_404=True):
+  """Fetches the http response from the specified URL.
+
+  Args:
+    host: The hostname of the Gerrit service.
+    path: The path on the Gerrit service. This will be prefixed with '/a'
+          automatically.
+    reqtype: The request type. Can be GET or POST.
+    headers: A mapping of extra HTTP headers to pass in with the request.
+    body: A string of data to send after the headers are finished.
+    ignore_204: for some requests gerrit-on-borg will return 204 to confirm
+                proper processing of the request. When processing responses to
+                these requests we should expect this status.
+    ignore_404: For many requests, gerrit-on-borg will return 404 if the request
+                doesn't match the database contents.  In most such cases, we
+                want the API to return None rather than raise an Exception.
+
+  Returns:
+    The connection's reply, as bytes.
+  """
+  @timeout_util.TimeoutDecorator(REQUEST_TIMEOUT_SECONDS)
+  def _FetchUrlHelper():
+    err_prefix = 'A transient error occured while querying %s:\n' % (host,)
+    try:
+      conn = CreateHttpConn(host, path, reqtype=reqtype, headers=headers,
+                            body=body)
+      response = conn.getresponse()
+    except socket.error as ex:
+      logging.warning('%s%s', err_prefix, str(ex))
+      raise
+
+    # Normal/good responses.
+    response_body = response.read()
+    if response.status == 204 and ignore_204:
+      # This exception is used to confirm expected response status.
+      raise GOBError(http_status=response.status, reason=response.reason)
+    if response.status == 404 and ignore_404:
+      return b''
+    elif response.status == 200:
+      return response_body
+
+    # Bad responses.
+    logging.debug('response msg:\n%s', response.msg)
+    http_version = 'HTTP/%s' % ('1.1' if response.version == 11 else '1.0')
+    msg = ('%s %s %s\n%s %d %s\nResponse body: %r' %
+           (reqtype, conn.req_params['url'], http_version,
+            http_version, response.status, response.reason,
+            response_body))
+
+    # Ones we can retry.
+    if response.status >= 500:
+      # A status >=500 is assumed to be a possible transient error; retry.
+      logging.warning('%s%s', err_prefix, msg)
+      raise InternalGOBError(
+          http_status=response.status,
+          reason=response.reason)
+
+    # Ones we cannot retry.
+    home = os.environ.get('HOME', '~')
+    url = 'https://%s/new-password' % host
+    if response.status in (302, 303, 307):
+      err_prefix = ('Redirect found; missing/bad %s/.gitcookies credentials or '
+                    'permissions (0600)?\n See %s' % (home, url))
+    elif response.status in (400,):
+      err_prefix = 'Permission error; talk to the admins of the GoB instance'
+    elif response.status in (401,):
+      err_prefix = ('Authorization error; missing/bad %s/.gitcookies '
+                    'credentials or permissions (0600)?\n See %s' % (home, url))
+    elif response.status in (422,):
+      err_prefix = ('Bad request body?')
+
+    logging.warning(err_prefix)
+
+    # If GOB output contained expected error message, reduce log visibility of
+    # raw GOB output reported below.
+    ep = ErrorParser()
+    ep.feed(response_body.decode('utf-8'))
+    ep.close()
+    parsed_div = ep.ParsedDiv()
+    if parsed_div:
+      logging.warning('GOB Error:\n%s', parsed_div)
+      logging_function = logging.debug
+    else:
+      logging_function = logging.warning
+
+    logging_function(msg)
+    if response.status >= 400:
+      # The 'X-ErrorId' header is set only on >= 400 response code.
+      logging_function('X-ErrorId: %s', response.getheader('X-ErrorId'))
+
+    try:
+      logging.warning('conn.sock.getpeername(): %s', conn.sock.getpeername())
+    except AttributeError:
+      logging.warning('peer name unavailable')
+
+    if response.status == httplib.CONFLICT:
+      # 409 conflict
+      if GOB_CONFLICT_ERRORS_RE.search(response_body):
+        raise GOBError(
+            http_status=response.status,
+            reason=GOB_ERROR_REASON_CLOSED_CHANGE)
+      else:
+        raise GOBError(http_status=response.status, reason=response.reason)
+    else:
+      raise GOBError(http_status=response.status, reason=response.reason)
+
+  return retry_util.RetryException(
+      (socket.error, InternalGOBError, timeout_util.TimeoutError),
+      TRY_LIMIT,
+      _FetchUrlHelper, sleep=SLEEP, backoff_factor=2)
+
+
+def FetchUrlJson(*args, **kwargs):
+  """Fetch the specified URL and parse it as JSON.
+
+  See FetchUrl for arguments.
+  """
+  fh = FetchUrl(*args, **kwargs)
+
+  # In case ignore_404 is True, we want to return None instead of
+  # raising an exception.
+  if not fh:
+    return None
+
+  # The first line of the response should always be: )]}'
+  if not fh.startswith(b")]}'"):
+    raise GOBError(http_status=200, reason='Unexpected json output: %r' % fh)
+
+  _, _, json_data = fh.partition(b'\n')
+  return json.loads(json_data)
+
+
+def QueryChanges(host, param_dict, first_param=None, limit=None, o_params=None,
+                 start=None):
+  """Queries a gerrit-on-borg server for changes matching query terms.
+
+  Args:
+    host: The Gerrit server hostname.
+    param_dict: A dictionary of search parameters, as documented here:
+        https://gerrit-review.googlesource.com/Documentation/user-search.html
+    first_param: A change identifier
+    limit: Maximum number of results to return.
+    o_params: A list of additional output specifiers, as documented here:
+        https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#list-changes
+    start: Offset in the result set to start at.
+
+  Returns:
+    A list of json-decoded query results.
+  """
+  # Note that no attempt is made to escape special characters; YMMV.
+  if not param_dict and not first_param:
+    raise RuntimeError('QueryChanges requires search parameters')
+  path = 'changes/?q=%s' % _QueryString(param_dict, first_param)
+  if start:
+    path = '%s&S=%d' % (path, start)
+  if limit:
+    path = '%s&n=%d' % (path, limit)
+  if o_params:
+    path = '%s&%s' % (path, '&'.join(['o=%s' % p for p in o_params]))
+  # Don't ignore 404; a query should always return a list, even if it's empty.
+  return FetchUrlJson(host, path, ignore_404=False)
+
+
+def MultiQueryChanges(host, param_dict, change_list, limit=None, o_params=None,
+                      start=None):
+  """Initiate a query composed of multiple sets of query parameters."""
+  if not change_list:
+    raise RuntimeError(
+        "MultiQueryChanges requires a list of change numbers/id's")
+  q = ['q=%s' % '+OR+'.join(urllib.parse.quote(str(x)) for x in change_list)]
+  if param_dict:
+    q.append(_QueryString(param_dict))
+  if limit:
+    q.append('n=%d' % limit)
+  if start:
+    q.append('S=%s' % start)
+  if o_params:
+    q.extend(['o=%s' % p for p in o_params])
+  path = 'changes/?%s' % '&'.join(q)
+  try:
+    result = FetchUrlJson(host, path, ignore_404=False)
+  except GOBError as e:
+    msg = '%s:\n%s' % (e, path)
+    raise GOBError(http_status=e.http_status, reason=msg)
+  return result
+
+
+def GetGerritFetchUrl(host):
+  """Given a gerrit host name returns URL of a gerrit instance to fetch from."""
+  return 'https://%s/' % host
+
+
+def GetChangePageUrl(host, change_number):
+  """Given a gerrit host name and change number, return change page url."""
+  return 'https://%s/#/c/%d/' % (host, change_number)
+
+
+def _GetChangePath(change):
+  """Given a change id, return a path prefix for the change."""
+  return 'changes/%s' % str(change).replace('/', '%2F')
+
+
+def GetChangeUrl(host, change):
+  """Given a gerrit host name and change id, return an url for the change."""
+  return 'https://%s/a/%s' % (host, _GetChangePath(change))
+
+
+def GetChange(host, change):
+  """Query a gerrit server for information about a single change."""
+  return FetchUrlJson(host, _GetChangePath(change))
+
+
+def GetChangeReview(host, change, revision=None):
+  """Get the current review information for a change."""
+  if revision is None:
+    revision = 'current'
+  path = '%s/revisions/%s/review' % (_GetChangePath(change), revision)
+  return FetchUrlJson(host, path)
+
+
+def GetChangeCommit(host, change, revision=None):
+  """Get the current review information for a change."""
+  if revision is None:
+    revision = 'current'
+  path = '%s/revisions/%s/commit' % (_GetChangePath(change), revision)
+  return FetchUrlJson(host, path)
+
+
+def GetChangeCurrentRevision(host, change):
+  """Get information about the latest revision for a given change."""
+  jmsg = GetChangeReview(host, change)
+  if jmsg:
+    return jmsg.get('current_revision')
+
+
+def GetChangeDetail(host, change, o_params=None):
+  """Query a gerrit server for extended information about a single change."""
+  path = '%s/detail' % _GetChangePath(change)
+  if o_params:
+    path = '%s?%s' % (path, '&'.join(['o=%s' % p for p in o_params]))
+  return FetchUrlJson(host, path)
+
+
+def GetChangeReviewers(host, change):
+  """Get information about all reviewers attached to a change.
+
+  Args:
+    host: The Gerrit host to interact with.
+    change: The Gerrit change ID.
+  """
+  warnings.warn('GetChangeReviewers is deprecated; use GetReviewers instead.')
+  GetReviewers(host, change)
+
+
+def ReviewedChange(host, change):
+  """Mark a gerrit change as reviewed."""
+  path = '%s/reviewed' % _GetChangePath(change)
+  return FetchUrlJson(host, path, reqtype='PUT', ignore_404=False)
+
+
+def UnreviewedChange(host, change):
+  """Mark a gerrit change as unreviewed."""
+  path = '%s/unreviewed' % _GetChangePath(change)
+  return FetchUrlJson(host, path, reqtype='PUT', ignore_404=False)
+
+
+def IgnoreChange(host, change):
+  """Ignore a gerrit change."""
+  path = '%s/ignore' % _GetChangePath(change)
+  return FetchUrlJson(host, path, reqtype='PUT', ignore_404=False)
+
+
+def UnignoreChange(host, change):
+  """Unignore a gerrit change."""
+  path = '%s/unignore' % _GetChangePath(change)
+  return FetchUrlJson(host, path, reqtype='PUT', ignore_404=False)
+
+
+def AbandonChange(host, change, msg=''):
+  """Abandon a gerrit change."""
+  path = '%s/abandon' % _GetChangePath(change)
+  body = {'message': msg}
+  return FetchUrlJson(host, path, reqtype='POST', body=body, ignore_404=False)
+
+
+def RestoreChange(host, change, msg=''):
+  """Restore a previously abandoned change."""
+  path = '%s/restore' % _GetChangePath(change)
+  body = {'message': msg}
+  return FetchUrlJson(host, path, reqtype='POST', body=body, ignore_404=False)
+
+
+def DeleteDraft(host, change):
+  """Delete a gerrit draft change."""
+  path = _GetChangePath(change)
+  try:
+    FetchUrl(host, path, reqtype='DELETE', ignore_204=True, ignore_404=False)
+  except GOBError as e:
+    # On success, gerrit returns status 204; anything else is an error.
+    if e.http_status != 204:
+      raise
+  else:
+    raise GOBError(
+        http_status=200,
+        reason='Unexpectedly received a 200 http status while deleting draft '
+               ' %r' % change)
+
+
+def SubmitChange(host, change, revision=None, wait_for_merge=True):
+  """Submits a gerrit change via Gerrit."""
+  if revision is None:
+    revision = 'current'
+  path = '%s/revisions/%s/submit' % (_GetChangePath(change), revision)
+  body = {'wait_for_merge': wait_for_merge}
+  return FetchUrlJson(host, path, reqtype='POST', body=body, ignore_404=False)
+
+
+def CheckChange(host, change, sha1=None):
+  """Performs consistency checks on the change, and fixes inconsistencies.
+
+  This is useful for forcing Gerrit to check whether a change has already been
+  merged into the git repo. Namely, if |sha1| is provided and the change is in
+  'NEW' status, Gerrit will check if a change with that |sha1| is in the repo
+  and mark the change as 'MERGED' if it exists.
+
+  Args:
+    host: The Gerrit host to interact with.
+    change: The Gerrit change ID.
+    sha1: An optional hint of the commit's SHA1 in Git.
+  """
+  path = '%s/check' % (_GetChangePath(change),)
+  if sha1:
+    body, headers = {'expect_merged_as': sha1}, {}
+  else:
+    body, headers = {}, {'Content-Length': '0'}
+
+  return FetchUrlJson(host, path, reqtype='POST',
+                      body=body, ignore_404=False,
+                      headers=headers)
+
+
+def GetAssignee(host, change):
+  """Get assignee for a change."""
+  path = '%s/assignee' % _GetChangePath(change)
+  return FetchUrlJson(host, path)
+
+
+def AddAssignee(host, change, assignee):
+  """Add reviewers to a change.
+
+  Args:
+    host: The Gerrit host to interact with.
+    change: The Gerrit change ID.
+    assignee: Gerrit account email as a string
+  """
+  path = '%s/assignee' % _GetChangePath(change)
+  body = {'assignee': assignee}
+  return  FetchUrlJson(host, path, reqtype='PUT', body=body, ignore_404=False)
+
+
+def MarkPrivate(host, change):
+  """Marks the given CL as private.
+
+  Args:
+    host: The gob host to interact with.
+    change: CL number on the given host.
+  """
+  path = '%s/private' % _GetChangePath(change)
+  try:
+    FetchUrlJson(host, path, reqtype='POST', ignore_404=False)
+  except GOBError as e:
+    # 201: created -- change was successfully marked private.
+    if e.http_status != 201:
+      raise
+  else:
+    raise GOBError(
+        http_status=200,
+        reason='Change was already marked private',
+    )
+
+
+def MarkNotPrivate(host, change):
+  """Sets the private bit on given CL to False.
+
+  Args:
+    host: The gob host to interact with.
+    change: CL number on the given host.
+  """
+  path = '%s/private.delete' % _GetChangePath(change)
+  try:
+    FetchUrlJson(host, path, reqtype='POST', ignore_404=False, ignore_204=True)
+  except GOBError as e:
+    if e.http_status == 204:
+      # 204: no content -- change was successfully marked not private.
+      pass
+    elif e.http_status == 409:
+      raise GOBError(
+          http_status=e.http_status,
+          reason='Change was already marked not private',
+      )
+    else:
+      raise
+  else:
+    raise GOBError(
+        http_status=200,
+        reason='Got unexpected 200 when marking change not private.',
+    )
+
+
+def GetReviewers(host, change):
+  """Get information about all reviewers attached to a change.
+
+  Args:
+    host: The Gerrit host to interact with.
+    change: The Gerrit change ID.
+  """
+  path = '%s/reviewers' % _GetChangePath(change)
+  return FetchUrlJson(host, path)
+
+
+def AddReviewers(host, change, add=None, notify=None):
+  """Add reviewers to a change."""
+  if not add:
+    return
+  if isinstance(add, six.string_types):
+    add = (add,)
+  body = {}
+  if notify:
+    body['notify'] = notify
+  path = '%s/reviewers' % _GetChangePath(change)
+  for r in add:
+    body['reviewer'] = r
+    jmsg = FetchUrlJson(host, path, reqtype='POST', body=body, ignore_404=False)
+  return jmsg
+
+
+def RemoveReviewers(host, change, remove=None, notify=None):
+  """Remove reveiewers from a change."""
+  if not remove:
+    return
+  if isinstance(remove, six.string_types):
+    remove = (remove,)
+  body = {}
+  if notify:
+    body['notify'] = notify
+  for r in remove:
+    path = '%s/reviewers/%s/delete' % (_GetChangePath(change), r)
+    try:
+      FetchUrl(host, path, reqtype='POST', body=body, ignore_204=True)
+    except GOBError as e:
+      # On success, gerrit returns status 204; anything else is an error.
+      if e.http_status != 204:
+        raise
+
+
+def SetReview(host, change, revision=None, msg=None, labels=None, notify=None):
+  """Set labels and/or add a message to a code review."""
+  if revision is None:
+    revision = 'current'
+  if not msg and not labels:
+    return
+  path = '%s/revisions/%s/review' % (_GetChangePath(change), revision)
+  body = {}
+  if msg:
+    body['message'] = msg
+  if labels:
+    body['labels'] = labels
+  if notify:
+    body['notify'] = notify
+  response = FetchUrlJson(host, path, reqtype='POST', body=body)
+  if response is None:
+    raise GOBError(
+        http_status=404,
+        reason='CL %s not found in %s' % (change, host))
+  if labels:
+    for key, val in labels.items():
+      if ('labels' not in response or key not in response['labels'] or
+          int(response['labels'][key] != int(val))):
+        raise GOBError(
+            http_status=200,
+            reason='Unable to set "%s" label on change %s.' % (key, change))
+
+
+def SetTopic(host, change, topic):
+  """Set |topic| for a change. If |topic| is empty, it will be deleted"""
+  path = '%s/topic' % _GetChangePath(change)
+  body = {'topic': topic}
+  return FetchUrlJson(host, path, reqtype='PUT', body=body, ignore_404=False)
+
+
+def SetHashtags(host, change, add, remove):
+  """Adds and / or removes hashtags from a change.
+
+  Args:
+    host: Hostname (without protocol prefix) of the gerrit server.
+    change: A gerrit change number.
+    add: a list of hashtags to be added.
+    remove: a list of hashtags to be removed.
+  """
+  path = '%s/hashtags' % _GetChangePath(change)
+  return FetchUrlJson(host, path, reqtype='POST',
+                      body={'add': add, 'remove': remove},
+                      ignore_404=False)
+
+
+def ResetReviewLabels(host, change, label, value='0', revision=None,
+                      message=None, notify=None):
+  """Reset the value of a given label for all reviewers on a change."""
+  if revision is None:
+    revision = 'current'
+  # This is tricky when working on the "current" revision, because there's
+  # always the risk that the "current" revision will change in between API
+  # calls.  So, the code dereferences the "current" revision down to a literal
+  # sha1 at the beginning and uses it for all subsequent calls.  As a sanity
+  # check, the "current" revision is dereferenced again at the end, and if it
+  # differs from the previous "current" revision, an exception is raised.
+  current = (revision == 'current')
+  jmsg = GetChangeDetail(
+      host, change, o_params=['CURRENT_REVISION', 'CURRENT_COMMIT'])
+  if current:
+    revision = jmsg['current_revision']
+  value = str(value)
+  path = '%s/revisions/%s/review' % (_GetChangePath(change), revision)
+  message = message or (
+      '%s label set to %s programmatically by chromite.' % (label, value))
+  for review in jmsg.get('labels', {}).get(label, {}).get('all', []):
+    if str(review.get('value', value)) != value:
+      body = {
+          'message': message,
+          'labels': {label: value},
+          'on_behalf_of': review['_account_id'],
+      }
+      if notify:
+        body['notify'] = notify
+      response = FetchUrlJson(host, path, reqtype='POST', body=body)
+      if str(response['labels'][label]) != value:
+        username = review.get('email', jmsg.get('name', ''))
+        raise GOBError(
+            http_status=200,
+            reason='Unable to set %s label for user "%s" on change %s.' % (
+                label, username, change))
+  if current:
+    new_revision = GetChangeCurrentRevision(host, change)
+    if not new_revision:
+      raise GOBError(
+          http_status=200,
+          reason='Could not get review information for change "%s"' % change)
+    elif new_revision != revision:
+      raise GOBError(
+          http_status=200,
+          reason='While resetting labels on change "%s", a new patchset was '
+                 'uploaded.' % change)
+
+
+def GetTipOfTrunkRevision(git_url):
+  """Returns the current git revision on the master branch."""
+  parsed_url = urllib.parse.urlparse(git_url)
+  path = parsed_url[2].rstrip('/') + '/+log/master?n=1&format=JSON'
+  j = FetchUrlJson(parsed_url[1], path, ignore_404=False)
+  if not j:
+    raise GOBError(
+        reason='Could not find revision information from %s' % git_url)
+  try:
+    return j['log'][0]['commit']
+  except (IndexError, KeyError, TypeError):
+    msg = ('The json returned by https://%s%s has an unfamiliar structure:\n'
+           '%s\n' % (parsed_url[1], path, j))
+    raise GOBError(reason=msg)
+
+
+def GetCommitDate(git_url, commit):
+  """Returns the date of a particular git commit.
+
+  The returned object is naive in the sense that it doesn't carry any timezone
+  information - you should assume UTC.
+
+  Args:
+    git_url: URL for the repository to get the commit date from.
+    commit: A git commit identifier (e.g. a sha1).
+
+  Returns:
+    A datetime object.
+  """
+  parsed_url = urllib.parse.urlparse(git_url)
+  path = '%s/+log/%s?n=1&format=JSON' % (parsed_url.path.rstrip('/'), commit)
+  j = FetchUrlJson(parsed_url.netloc, path, ignore_404=False)
+  if not j:
+    raise GOBError(
+        reason='Could not find revision information from %s' % git_url)
+  try:
+    commit_timestr = j['log'][0]['committer']['time']
+  except (IndexError, KeyError, TypeError):
+    msg = ('The json returned by https://%s%s has an unfamiliar structure:\n'
+           '%s\n' % (parsed_url.netloc, path, j))
+    raise GOBError(reason=msg)
+  try:
+    # We're parsing a string of the form 'Tue Dec 02 17:48:06 2014'.
+    return datetime.datetime.strptime(commit_timestr,
+                                      constants.GOB_COMMIT_TIME_FORMAT)
+  except ValueError:
+    raise GOBError(reason='Failed parsing commit time "%s"' % commit_timestr)
+
+
+def GetAccount(host):
+  """Get information about the user account."""
+  return FetchUrlJson(host, 'accounts/self')
diff --git a/utils/frozen_chromite/lib/gs.py b/utils/frozen_chromite/lib/gs.py
new file mode 100644
index 0000000..f97f4d5
--- /dev/null
+++ b/utils/frozen_chromite/lib/gs.py
@@ -0,0 +1,1417 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Library to make common google storage operations more reliable."""
+
+from __future__ import print_function
+
+import collections
+import contextlib
+import datetime
+import errno
+import fnmatch
+import getpass
+import glob
+import hashlib
+import os
+import re
+import shutil
+import subprocess
+import tempfile
+
+import six
+from six.moves import urllib
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cache
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_collections
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import path_util
+from autotest_lib.utils.frozen_chromite.lib import retry_stats
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import signals
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+
+
+# This bucket has the allAuthenticatedUsers:READER ACL.
+AUTHENTICATION_BUCKET = 'gs://chromeos-authentication-bucket/'
+
+# Public path, only really works for files.
+PUBLIC_BASE_HTTPS_URL = 'https://storage.googleapis.com/'
+
+# Private path for files.
+PRIVATE_BASE_HTTPS_URL = 'https://storage.cloud.google.com/'
+
+# Private path for directories.
+# TODO(akeshet): this is a workaround for b/27653354. If that is ultimately
+# fixed, revisit this workaround.
+PRIVATE_BASE_HTTPS_DOWNLOAD_URL = 'https://stainless.corp.google.com/browse/'
+BASE_GS_URL = 'gs://'
+
+# Format used by "gsutil ls -l" when reporting modified time.
+DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
+
+# Regexp for parsing each line of output from "gsutil ls -l".
+# This regexp is prepared for the generation and meta_generation values,
+# too, even though they are not expected until we use "-a".
+#
+# A detailed listing looks like:
+#    99908  2014-03-01T05:50:08Z  gs://bucket/foo/abc#1234  metageneration=1
+#                                 gs://bucket/foo/adir/
+#    99908  2014-03-04T01:16:55Z  gs://bucket/foo/def#5678  metageneration=1
+# TOTAL: 2 objects, 199816 bytes (495.36 KB)
+LS_LA_RE = re.compile(
+    r'^\s*(?P<content_length>\d*?)\s+'
+    r'(?P<creation_time>\S*?)\s+'
+    r'(?P<url>[^#$]+).*?'
+    r'('
+    r'#(?P<generation>\d+)\s+'
+    r'meta_?generation=(?P<metageneration>\d+)'
+    r')?\s*$')
+LS_RE = re.compile(r'^\s*(?P<content_length>)(?P<creation_time>)(?P<url>.*)'
+                   r'(?P<generation>)(?P<metageneration>)\s*$')
+
+# Format used by ContainsWildCard, which is duplicated from
+# https://github.com/GoogleCloudPlatform/gsutil/blob/v4.21/gslib/storage_url.py#L307.
+WILDCARD_REGEX = re.compile(r'[*?\[\]]')
+
+
+def PathIsGs(path):
+  """Determine if a path is a Google Storage URI."""
+  return path.startswith(BASE_GS_URL)
+
+
+def CanonicalizeURL(url, strict=False):
+  """Convert provided URL to gs:// URL, if it follows a known format.
+
+  Args:
+    url: URL to canonicalize.
+    strict: Raises exception if URL cannot be canonicalized.
+  """
+  for prefix in (PUBLIC_BASE_HTTPS_URL,
+                 PRIVATE_BASE_HTTPS_URL,
+                 PRIVATE_BASE_HTTPS_DOWNLOAD_URL,
+                 'https://pantheon.corp.google.com/storage/browser/',
+                 'https://commondatastorage.googleapis.com/'):
+    if url.startswith(prefix):
+      return url.replace(prefix, BASE_GS_URL, 1)
+
+  if not PathIsGs(url) and strict:
+    raise ValueError('Url %r cannot be canonicalized.' % url)
+
+  return url
+
+
+def GetGsURL(bucket, for_gsutil=False, public=True, suburl=''):
+  """Construct a Google Storage URL
+
+  Args:
+    bucket: The Google Storage bucket to use
+    for_gsutil: Do you want a URL for passing to `gsutil`?
+    public: Do we want the public or private url
+    suburl: A url fragment to tack onto the end
+
+  Returns:
+    The fully constructed URL
+  """
+  url = 'gs://%s/%s' % (bucket, suburl)
+
+  if for_gsutil:
+    return url
+  else:
+    return GsUrlToHttp(url, public=public)
+
+
+def GsUrlToHttp(path, public=True, directory=False):
+  """Convert a GS URL to a HTTP URL for the same resource.
+
+  Because the HTTP Urls are not fixed (and may not always be simple prefix
+  replacements), use this method to centralize the conversion.
+
+  Directories need to have different URLs from files, because the Web UIs for GS
+  are weird and really inconsistent. Also public directories probably
+  don't work, and probably never will (permissions as well as UI).
+
+  e.g. 'gs://chromeos-image-archive/path/file' ->
+       'https://pantheon/path/file'
+
+  Args:
+    path: GS URL to convert.
+    public: Is this URL for Googler access, or publicly visible?
+    directory: Force this URL to be treated as a directory?
+               We try to autodetect on False.
+
+  Returns:
+    https URL as a string.
+  """
+  assert PathIsGs(path)
+  directory = directory or path.endswith('/')
+
+  # Public HTTP URls for directories don't work'
+  # assert not public or not directory,
+
+  if public:
+    return path.replace(BASE_GS_URL, PUBLIC_BASE_HTTPS_URL, 1)
+  else:
+    if directory:
+      return path.replace(BASE_GS_URL, PRIVATE_BASE_HTTPS_DOWNLOAD_URL, 1)
+    else:
+      return path.replace(BASE_GS_URL, PRIVATE_BASE_HTTPS_URL, 1)
+
+
+class GSContextException(Exception):
+  """Base exception for all exceptions thrown by GSContext."""
+
+
+# Since the underlying code uses run, some callers might be trying to
+# catch cros_build_lib.RunCommandError themselves.  Extend that class so that
+# code continues to work.
+class GSCommandError(GSContextException, cros_build_lib.RunCommandError):
+  """Thrown when an error happened we couldn't decode."""
+
+
+class GSContextPreconditionFailed(GSContextException):
+  """Thrown when google storage returns code=PreconditionFailed."""
+
+
+class GSNoSuchKey(GSContextException):
+  """Thrown when google storage returns code=NoSuchKey."""
+
+
+# Detailed results of GSContext.Stat.
+#
+# The fields directory correspond to gsutil stat results.
+#
+#  Field name        Type         Example
+#   creation_time     datetime     Sat, 23 Aug 2014 06:53:20 GMT
+#   content_length    int          74
+#   content_type      string       application/octet-stream
+#   hash_crc32c       string       BBPMPA==
+#   hash_md5          string       ms+qSYvgI9SjXn8tW/5UpQ==
+#   etag              string       CNCgocbmqMACEAE=
+#   generation        int          1408776800850000
+#   metageneration    int          1
+#
+# Note: We omit a few stat fields as they are not always available, and we
+# have no callers that want this currently.
+#
+#   content_language  string/None  en   # This field may be None.
+GSStatResult = collections.namedtuple(
+    'GSStatResult',
+    ('creation_time', 'content_length', 'content_type', 'hash_crc32c',
+     'hash_md5', 'etag', 'generation', 'metageneration'))
+
+
+# Detailed results of GSContext.List.
+GSListResult = collections.namedtuple(
+    'GSListResult',
+    ('url', 'creation_time', 'content_length', 'generation', 'metageneration'))
+
+
+ErrorDetails = cros_collections.Collection(
+    'ErrorDetails',
+    type=None, message_pattern='', retriable=None, exception=None)
+
+
+class GSCounter(object):
+  """A counter class for Google Storage."""
+
+  def __init__(self, ctx, path):
+    """Create a counter object.
+
+    Args:
+      ctx: A GSContext object.
+      path: The path to the counter in Google Storage.
+    """
+    self.ctx = ctx
+    self.path = path
+
+  def Get(self):
+    """Get the current value of a counter."""
+    try:
+      return int(self.ctx.Cat(self.path))
+    except GSNoSuchKey:
+      return 0
+
+  def AtomicCounterOperation(self, default_value, operation):
+    """Atomically set the counter value using |operation|.
+
+    Args:
+      default_value: Default value to use for counter, if counter
+                     does not exist.
+      operation: Function that takes the current counter value as a
+                 parameter, and returns the new desired value.
+
+    Returns:
+      The new counter value. None if value could not be set.
+    """
+    generation, _ = self.ctx.GetGeneration(self.path)
+    for _ in range(self.ctx.retries + 1):
+      try:
+        value = default_value if generation == 0 else operation(self.Get())
+        self.ctx.Copy('-', self.path, input=str(value), version=generation)
+        return value
+      except (GSContextPreconditionFailed, GSNoSuchKey):
+        # GSContextPreconditionFailed is thrown if another builder is also
+        # trying to update the counter and we lost the race. GSNoSuchKey is
+        # thrown if another builder deleted the counter. In either case, fetch
+        # the generation again, and, if it has changed, try the copy again.
+        new_generation, _ = self.ctx.GetGeneration(self.path)
+        if new_generation == generation:
+          raise
+        generation = new_generation
+
+  def Increment(self):
+    """Increment the counter.
+
+    Returns:
+      The new counter value. None if value could not be set.
+    """
+    return self.AtomicCounterOperation(1, lambda x: x + 1)
+
+  def Decrement(self):
+    """Decrement the counter.
+
+    Returns:
+      The new counter value. None if value could not be set.
+    """
+    return self.AtomicCounterOperation(-1, lambda x: x - 1)
+
+  def Reset(self):
+    """Reset the counter to zero.
+
+    Returns:
+      The new counter value. None if value could not be set.
+    """
+    return self.AtomicCounterOperation(0, lambda x: 0)
+
+  def StreakIncrement(self):
+    """Increment the counter if it is positive, otherwise set it to 1.
+
+    Returns:
+      The new counter value. None if value could not be set.
+    """
+    return self.AtomicCounterOperation(1, lambda x: x + 1 if x > 0 else 1)
+
+  def StreakDecrement(self):
+    """Decrement the counter if it is negative, otherwise set it to -1.
+
+    Returns:
+      The new counter value. None if value could not be set.
+    """
+    return self.AtomicCounterOperation(-1, lambda x: x - 1 if x < 0 else -1)
+
+
+class GSContext(object):
+  """A class to wrap common google storage operations."""
+
+  # Error messages that indicate an invalid BOTO config.
+  AUTHORIZATION_ERRORS = ('no configured', 'none configured',
+                          'detail=Authorization', '401 Anonymous caller')
+
+  DEFAULT_BOTO_FILE = os.path.expanduser('~/.boto')
+  DEFAULT_GSUTIL_TRACKER_DIR = os.path.expanduser('~/.gsutil/tracker-files')
+  # This is set for ease of testing.
+  DEFAULT_GSUTIL_BIN = None
+  DEFAULT_GSUTIL_BUILDER_BIN = '/b/build/third_party/gsutil/gsutil'
+  # How many times to retry uploads.
+  DEFAULT_RETRIES = 3
+
+  # Multiplier for how long to sleep (in seconds) between retries; will delay
+  # (1*sleep) the first time, then (2*sleep), continuing via attempt * sleep.
+  DEFAULT_SLEEP_TIME = 60
+
+  GSUTIL_VERSION = '4.51'
+  GSUTIL_TAR = 'gsutil_%s.tar.gz' % GSUTIL_VERSION
+  GSUTIL_URL = (PUBLIC_BASE_HTTPS_URL +
+                'chromeos-mirror/gentoo/distfiles/%s' % GSUTIL_TAR)
+  GSUTIL_API_SELECTOR = 'JSON'
+
+  RESUMABLE_UPLOAD_ERROR = (b'Too many resumable upload attempts failed '
+                            b'without progress')
+  RESUMABLE_DOWNLOAD_ERROR = (b'Too many resumable download attempts failed '
+                              b'without progress')
+
+  # TODO: Below is a list of known flaky errors that we should
+  # retry. The list needs to be extended.
+  RESUMABLE_ERROR_MESSAGE = (
+      RESUMABLE_DOWNLOAD_ERROR,
+      RESUMABLE_UPLOAD_ERROR,
+      b'ResumableUploadException',
+      b'ResumableUploadAbortException',
+      b'ResumableDownloadException',
+      b'ssl.SSLError: The read operation timed out',
+      # TODO: Error messages may change in different library versions,
+      # use regexes to match resumable error messages.
+      b"ssl.SSLError: ('The read operation timed out',)",
+      b'ssl.SSLError: _ssl.c:495: The handshake operation timed out',
+      b'Unable to find the server',
+      b"doesn't match cloud-supplied digest",
+      b'ssl.SSLError: [Errno 8]',
+      b'EOF occurred in violation of protocol',
+      # TODO(nxia): crbug.com/775330 narrow down the criteria for retrying
+      b'AccessDeniedException',
+  )
+
+  # We have seen flaky errors with 5xx return codes
+  # See b/17376491 for the "JSON decoding" error.
+  # We have seen transient Oauth 2.0 credential errors (crbug.com/414345).
+  TRANSIENT_ERROR_MESSAGE = (
+      b'ServiceException: 5',
+      b'Failure: No JSON object could be decoded',
+      b'Oauth 2.0 User Account',
+      b'InvalidAccessKeyId',
+      b'socket.error: [Errno 104] Connection reset by peer',
+      b'Received bad request from server',
+      b"can't start new thread",
+  )
+
+  @classmethod
+  def GetDefaultGSUtilBin(cls, cache_dir=None, cache_user=None):
+    if cls.DEFAULT_GSUTIL_BIN is None:
+      if cache_dir is None:
+        cache_dir = path_util.GetCacheDir()
+      if cache_dir is not None:
+        common_path = os.path.join(cache_dir, constants.COMMON_CACHE)
+        tar_cache = cache.TarballCache(common_path, cache_user=cache_user)
+        key = (cls.GSUTIL_TAR,)
+        # The common cache will not be LRU, removing the need to hold a read
+        # lock on the cached gsutil.
+        ref = tar_cache.Lookup(key)
+        ref.SetDefault(cls.GSUTIL_URL)
+        cls.DEFAULT_GSUTIL_BIN = os.path.join(ref.path, 'gsutil', 'gsutil')
+        cls._CompileCrcmod(ref.path)
+      else:
+        # Check if the default gsutil path for builders exists. If
+        # not, try locating gsutil. If none exists, simply use 'gsutil'.
+        gsutil_bin = cls.DEFAULT_GSUTIL_BUILDER_BIN
+        if not os.path.exists(gsutil_bin):
+          gsutil_bin = osutils.Which('gsutil')
+        if gsutil_bin is None:
+          gsutil_bin = 'gsutil'
+        cls.DEFAULT_GSUTIL_BIN = gsutil_bin
+
+    return cls.DEFAULT_GSUTIL_BIN
+
+  @classmethod
+  def _CompileCrcmod(cls, path):
+    """Try to setup a compiled crcmod for gsutil.
+
+    The native crcmod code is much faster than the python implementation, and
+    enables some more features (otherwise gsutil internally disables them).
+    Try to compile the module on demand in the crcmod tree bundled with gsutil.
+
+    For more details, see:
+    https://cloud.google.com/storage/docs/gsutil/addlhelp/CRC32CandInstallingcrcmod
+    """
+    src_root = os.path.join(path, 'gsutil', 'third_party', 'crcmod')
+
+    # Try to build it once.
+    flag = os.path.join(src_root, '.chromite.tried.build')
+    if os.path.exists(flag):
+      return
+    # Flag things now regardless of how the attempt below works out.
+    try:
+      osutils.Touch(flag)
+    except IOError as e:
+      # If the gsutil dir was cached previously as root, but now we're
+      # non-root, just flag it and return.
+      if e.errno == errno.EACCES:
+        logging.debug('Skipping gsutil crcmod compile due to permissions')
+        cros_build_lib.sudo_run(['touch', flag], debug_level=logging.DEBUG)
+        return
+      else:
+        raise
+
+    # See if the system includes one in which case we're done.
+    # We probe `python` as that's what gsutil uses for its shebang.
+    result = cros_build_lib.run(
+        ['python', '-c', 'from crcmod.crcmod import _usingExtension; '
+         'exit(0 if _usingExtension else 1)'], check=False, capture_output=True)
+    if result.returncode == 0:
+      return
+
+    # See if the local copy has one.
+    for pyver in ('python2', 'python3'):
+      logging.debug('Attempting to compile local crcmod for %s gsutil', pyver)
+      with osutils.TempDir(prefix='chromite.gsutil.crcmod') as tempdir:
+        result = cros_build_lib.run(
+            [pyver, 'setup.py', 'build', '--build-base', tempdir,
+             '--build-platlib', tempdir],
+            cwd=src_root, capture_output=True, check=False,
+            debug_level=logging.DEBUG)
+        if result.returncode:
+          continue
+
+        # Locate the module in the build dir.
+        copied = False
+        for mod_path in glob.glob(
+            os.path.join(tempdir, 'crcmod', '_crcfunext*.so')):
+          dst_mod_path = os.path.join(src_root, pyver, 'crcmod',
+                                      os.path.basename(mod_path))
+          try:
+            shutil.copy2(mod_path, dst_mod_path)
+            copied = True
+          except shutil.Error:
+            pass
+
+        if not copied:
+          # If the module compile failed (missing compiler/headers/whatever),
+          # then the setup.py build command above would have passed, but there
+          # won't actually be a _crcfunext.so module.  Check for it here to
+          # disambiguate other errors from shutil.copy2.
+          logging.debug('No crcmod module produced (missing host compiler?)')
+          continue
+
+  def __init__(self, boto_file=None, cache_dir=None, acl=None,
+               dry_run=False, gsutil_bin=None, init_boto=False, retries=None,
+               sleep=None, cache_user=None):
+    """Constructor.
+
+    Args:
+      boto_file: Fully qualified path to user's .boto credential file.
+      cache_dir: The absolute path to the cache directory. Use the default
+        fallback if not given.
+      acl: If given, a canned ACL. It is not valid to pass in an ACL file
+        here, because most gsutil commands do not accept ACL files. If you
+        would like to use an ACL file, use the SetACL command instead.
+      dry_run: Testing mode that prints commands that would be run.
+      gsutil_bin: If given, the absolute path to the gsutil binary.  Else
+        the default fallback will be used.
+      init_boto: If set to True, GSContext will check during __init__ if a
+        valid boto config is configured, and if not, will attempt to ask the
+        user to interactively set up the boto config.
+      retries: Number of times to retry a command before failing.
+      sleep: Amount of time to sleep between failures.
+      cache_user: user for creating cache_dir for gsutil. Default is None.
+    """
+    if gsutil_bin is None:
+      gsutil_bin = self.GetDefaultGSUtilBin(cache_dir, cache_user=cache_user)
+    else:
+      self._CheckFile('gsutil not found', gsutil_bin)
+    self.gsutil_bin = gsutil_bin
+
+    # The version of gsutil is retrieved on demand and cached here.
+    self._gsutil_version = None
+
+    # Increase the number of retries. With 10 retries, Boto will try a total of
+    # 11 times and wait up to 2**11 seconds (~30 minutes) in total, not
+    # not including the time spent actually uploading or downloading.
+    self.gsutil_flags = ['-o', 'Boto:num_retries=10']
+
+    # Set HTTP proxy if environment variable http_proxy is set
+    # (crbug.com/325032).
+    if 'http_proxy' in os.environ:
+      url = urllib.parse.urlparse(os.environ['http_proxy'])
+      if not url.hostname or (not url.username and url.password):
+        logging.warning('GS_ERROR: Ignoring env variable http_proxy because it '
+                        'is not properly set: %s', os.environ['http_proxy'])
+      else:
+        self.gsutil_flags += ['-o', 'Boto:proxy=%s' % url.hostname]
+        if url.username:
+          self.gsutil_flags += ['-o', 'Boto:proxy_user=%s' % url.username]
+        if url.password:
+          self.gsutil_flags += ['-o', 'Boto:proxy_pass=%s' % url.password]
+        if url.port:
+          self.gsutil_flags += ['-o', 'Boto:proxy_port=%d' % url.port]
+
+    # Prefer boto_file if specified, else prefer the env then the default.
+    if boto_file is None:
+      boto_file = os.environ.get('BOTO_CONFIG')
+    if boto_file is None and os.path.isfile(self.DEFAULT_BOTO_FILE):
+      # Only set boto file to DEFAULT_BOTO_FILE if it exists.
+      boto_file = self.DEFAULT_BOTO_FILE
+
+    self.boto_file = boto_file
+
+    self.acl = acl
+
+    self.dry_run = dry_run
+    self.retries = self.DEFAULT_RETRIES if retries is None else int(retries)
+    self._sleep_time = self.DEFAULT_SLEEP_TIME if sleep is None else int(sleep)
+
+    if init_boto and not dry_run:
+      # We can't really expect gsutil to even be present in dry_run mode.
+      self._InitBoto()
+
+  @property
+  def gsutil_version(self):
+    """Return the version of the gsutil in this context."""
+    if not self._gsutil_version:
+      if self.dry_run:
+        self._gsutil_version = self.GSUTIL_VERSION
+      else:
+        cmd = ['-q', 'version']
+
+        # gsutil has been known to return version to stderr in the past, so
+        # use stderr=subprocess.STDOUT.
+        result = self.DoCommand(cmd, stdout=True, stderr=subprocess.STDOUT)
+
+        # Expect output like: 'gsutil version 3.35' or 'gsutil version: 4.5'.
+        match = re.search(r'^\s*gsutil\s+version:?\s+([\d.]+)', result.output,
+                          re.IGNORECASE)
+        if match:
+          self._gsutil_version = match.group(1)
+        else:
+          raise GSContextException('Unexpected output format from "%s":\n%s.' %
+                                   (result.cmdstr, result.output))
+
+    return self._gsutil_version
+
+  def _CheckFile(self, errmsg, afile):
+    """Pre-flight check for valid inputs.
+
+    Args:
+      errmsg: Error message to display.
+      afile: Fully qualified path to test file existance.
+    """
+    if not os.path.isfile(afile):
+      raise GSContextException('%s, %s is not a file' % (errmsg, afile))
+
+  def _TestGSLs(self):
+    """Quick test of gsutil functionality."""
+    # The bucket in question is readable by any authenticated account.
+    # If we can list it's contents, we have valid authentication.
+    cmd = ['ls', AUTHENTICATION_BUCKET]
+    result = self.DoCommand(cmd, retries=0, debug_level=logging.DEBUG,
+                            stderr=True, check=False)
+
+    # Did we fail with an authentication error?
+    if (result.returncode == 1 and
+        any(e in result.error for e in self.AUTHORIZATION_ERRORS)):
+      logging.warning('gsutil authentication failure msg: %s', result.error)
+      return False
+
+    return True
+
+  def _ConfigureBotoConfig(self):
+    """Make sure we can access protected bits in GS."""
+    print('Configuring gsutil. **Please use your @google.com account.**')
+    try:
+      if not self.boto_file:
+        self.boto_file = self.DEFAULT_BOTO_FILE
+      self.DoCommand(['config'], retries=0, debug_level=logging.CRITICAL,
+                     print_cmd=False)
+    finally:
+      if (os.path.exists(self.boto_file) and not
+          os.path.getsize(self.boto_file)):
+        os.remove(self.boto_file)
+        raise GSContextException('GS config could not be set up.')
+
+  def _InitBoto(self):
+    if not self._TestGSLs():
+      self._ConfigureBotoConfig()
+
+  def Cat(self, path, **kwargs):
+    """Returns the contents of a GS object."""
+    kwargs.setdefault('stdout', True)
+    encoding = kwargs.setdefault('encoding', None)
+    errors = kwargs.setdefault('errors', None)
+    if not PathIsGs(path):
+      # gsutil doesn't support cat-ting a local path, so read it ourselves.
+      mode = 'rb' if encoding is None else 'r'
+      try:
+        return osutils.ReadFile(path, mode=mode, encoding=encoding,
+                                errors=errors)
+      except Exception as e:
+        if getattr(e, 'errno', None) == errno.ENOENT:
+          raise GSNoSuchKey('Cat Error: file %s does not exist' % path)
+        else:
+          raise GSContextException(str(e))
+    elif self.dry_run:
+      return b'' if encoding is None else ''
+    else:
+      return self.DoCommand(['cat', path], **kwargs).output
+
+  def StreamingCat(self, path, chunksize=0x100000):
+    """Returns the content of a GS file as a stream.
+
+    Unlike Cat or Copy, this function doesn't support any internal retry or
+    validation by computing checksum of downloaded data. Users should perform
+    their own validation, or use Cat() instead.
+
+    Args:
+      path: Full gs:// path of the src file.
+      chunksize: At most how much data read from upstream and yield to callers
+        at a time. The default value is 1 MB.
+
+    Yields:
+      The file content, chunk by chunk, as bytes.
+    """
+    assert PathIsGs(path)
+
+    if self.dry_run:
+      return (lambda: (yield ''))()
+
+    cmd = [self.gsutil_bin] + self.gsutil_flags + ['cat', path]
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+
+    def read_content():
+      try:
+        while True:
+          data = proc.stdout.read(chunksize)
+          if not data and proc.poll() is not None:
+            break
+          if data:
+            yield data
+
+        rc = proc.poll()
+        if rc:
+          raise GSCommandError(
+              'Cannot stream cat %s from Google Storage!' % path, rc, None)
+      finally:
+        if proc.returncode is None:
+          proc.stdout.close()
+          proc.terminate()
+
+    return read_content()
+
+  def CopyInto(self, local_path, remote_dir, filename=None, **kwargs):
+    """Upload a local file into a directory in google storage.
+
+    Args:
+      local_path: Local file path to copy.
+      remote_dir: Full gs:// url of the directory to transfer the file into.
+      filename: If given, the filename to place the content at; if not given,
+        it's discerned from basename(local_path).
+      **kwargs: See Copy() for documentation.
+
+    Returns:
+      The generation of the remote file.
+    """
+    filename = filename if filename is not None else local_path
+    # Basename it even if an explicit filename was given; we don't want
+    # people using filename as a multi-directory path fragment.
+    return self.Copy(local_path,
+                     '%s/%s' % (remote_dir, os.path.basename(filename)),
+                     **kwargs)
+
+  @staticmethod
+  def GetTrackerFilenames(dest_path):
+    """Returns a list of gsutil tracker filenames.
+
+    Tracker files are used by gsutil to resume downloads/uploads. This
+    function does not handle parallel uploads.
+
+    Args:
+      dest_path: Either a GS path or an absolute local path.
+
+    Returns:
+      The list of potential tracker filenames.
+    """
+    dest = urllib.parse.urlsplit(dest_path)
+    filenames = []
+    if dest.scheme == 'gs':
+      prefix = 'upload'
+      bucket_name = dest.netloc
+      object_name = dest.path.lstrip('/')
+      filenames.append(
+          re.sub(r'[/\\]', '_', 'resumable_upload__%s__%s__%s.url' %
+                 (bucket_name, object_name, GSContext.GSUTIL_API_SELECTOR)))
+    else:
+      prefix = 'download'
+      filenames.append(
+          re.sub(r'[/\\]', '_', 'resumable_download__%s__%s.etag' %
+                 (dest.path, GSContext.GSUTIL_API_SELECTOR)))
+
+    hashed_filenames = []
+    for filename in filenames:
+      m = hashlib.sha1(filename.encode())
+      hashed_filenames.append('%s_TRACKER_%s.%s' %
+                              (prefix, m.hexdigest(), filename[-16:]))
+
+    return hashed_filenames
+
+  def _RetryFilter(self, e):
+    """Returns whether to retry RunCommandError exception |e|.
+
+    Args:
+      e: Exception object to filter. Exception may be re-raised as
+         as different type, if _RetryFilter determines a more appropriate
+         exception type based on the contents of |e|.
+    """
+    error_details = self._MatchKnownError(e)
+    if error_details.exception:
+      raise error_details.exception
+    return error_details.retriable
+
+  def _MatchKnownError(self, e):
+    """Function to match known RunCommandError exceptions.
+
+    Args:
+      e: Exception object to filter.
+
+    Returns:
+      An ErrorDetails instance with details about the message pattern found.
+    """
+    if not retry_util.ShouldRetryCommandCommon(e):
+      if not isinstance(e, cros_build_lib.RunCommandError):
+        error_type = 'unknown'
+      else:
+        error_type = 'failed_to_launch'
+      return ErrorDetails(type=error_type, retriable=False)
+
+    # e is guaranteed by above filter to be a RunCommandError
+    if e.result.returncode < 0:
+      sig_name = signals.StrSignal(-e.result.returncode)
+      logging.info('Child process received signal %d; not retrying.', sig_name)
+      return ErrorDetails(type='received_signal', message_pattern=sig_name,
+                          retriable=False)
+
+    error = e.result.error
+    if error:
+      # Since the captured error will use the encoding the user requested,
+      # normalize to bytes for testing below.
+      if isinstance(error, six.text_type):
+        error = error.encode('utf-8')
+
+      # gsutil usually prints PreconditionException when a precondition fails.
+      # It may also print "ResumableUploadAbortException: 412 Precondition
+      # Failed", so the logic needs to be a little more general.
+      if (b'PreconditionException' in error or
+          b'412 Precondition Failed' in error):
+        return ErrorDetails(type='precondition_exception', retriable=False,
+                            exception=GSContextPreconditionFailed(e))
+
+      # If the file does not exist, one of the following errors occurs. The
+      # "stat" command leaves off the "CommandException: " prefix, but it also
+      # outputs to stdout instead of stderr and so will not be caught here
+      # regardless.
+      if (b'CommandException: No URLs matched' in error or
+          b'NotFoundException:' in error or
+          b'One or more URLs matched no objects' in error):
+        return ErrorDetails(type='no_such_key', retriable=False,
+                            exception=GSNoSuchKey(e))
+
+      logging.warning('GS_ERROR: %s ', error)
+
+      # Temporary fix: remove the gsutil tracker files so that our retry
+      # can hit a different backend. This should be removed after the
+      # bug is fixed by the Google Storage team (see crbug.com/308300).
+      resumable_error = _FirstSubstring(error, self.RESUMABLE_ERROR_MESSAGE)
+      if resumable_error:
+        # Only remove the tracker files if we try to upload/download a file.
+        if 'cp' in e.result.cmd[:-2]:
+          # Assume a command: gsutil [options] cp [options] src_path dest_path
+          # dest_path needs to be a fully qualified local path, which is already
+          # required for GSContext.Copy().
+          tracker_filenames = self.GetTrackerFilenames(e.result.cmd[-1])
+          logging.info('Potential list of tracker files: %s',
+                       tracker_filenames)
+          for tracker_filename in tracker_filenames:
+            tracker_file_path = os.path.join(self.DEFAULT_GSUTIL_TRACKER_DIR,
+                                             tracker_filename)
+            if os.path.exists(tracker_file_path):
+              logging.info('Deleting gsutil tracker file %s before retrying.',
+                           tracker_file_path)
+              logging.info('The content of the tracker file: %s',
+                           osutils.ReadFile(tracker_file_path))
+              osutils.SafeUnlink(tracker_file_path)
+        return ErrorDetails(type='resumable',
+                            message_pattern=resumable_error.decode('utf-8'),
+                            retriable=True)
+
+      transient_error = _FirstSubstring(error, self.TRANSIENT_ERROR_MESSAGE)
+      if transient_error:
+        return ErrorDetails(type='transient',
+                            message_pattern=transient_error.decode('utf-8'),
+                            retriable=True)
+
+    return ErrorDetails(type='unknown', retriable=False)
+
+  # TODO(mtennant): Make a private method.
+  def DoCommand(self, gsutil_cmd, headers=(), retries=None, version=None,
+                parallel=False, **kwargs):
+    """Run a gsutil command, suppressing output, and setting retry/sleep.
+
+    Args:
+      gsutil_cmd: The (mostly) constructed gsutil subcommand to run.
+      headers: A list of raw headers to pass down.
+      parallel: Whether gsutil should enable parallel copy/update of multiple
+        files. NOTE: This option causes gsutil to use significantly more
+        memory, even if gsutil is only uploading one file.
+      retries: How many times to retry this command (defaults to setting given
+        at object creation).
+      version: If given, the generation; essentially the timestamp of the last
+        update.  Note this is not the same as sequence-number; it's
+        monotonically increasing bucket wide rather than reset per file.
+        The usage of this is if we intend to replace/update only if the version
+        is what we expect.  This is useful for distributed reasons- for example,
+        to ensure you don't overwrite someone else's creation, a version of
+        0 states "only update if no version exists".
+
+    Returns:
+      A RunCommandResult object.
+    """
+    kwargs = kwargs.copy()
+    kwargs.setdefault('stderr', True)
+    kwargs.setdefault('encoding', 'utf-8')
+
+    cmd = [self.gsutil_bin]
+    cmd += self.gsutil_flags
+    for header in headers:
+      cmd += ['-h', header]
+    if version is not None:
+      cmd += ['-h', 'x-goog-if-generation-match:%d' % int(version)]
+
+    # Enable parallel copy/update of multiple files if stdin is not to
+    # be piped to the command. This does not split a single file into
+    # smaller components for upload.
+    if parallel and kwargs.get('input') is None:
+      cmd += ['-m']
+
+    cmd.extend(gsutil_cmd)
+
+    if retries is None:
+      retries = self.retries
+
+    extra_env = kwargs.pop('extra_env', {})
+    if self.boto_file and os.path.isfile(self.boto_file):
+      extra_env.setdefault('BOTO_CONFIG', self.boto_file)
+
+    if self.dry_run:
+      logging.debug("%s: would've run: %s", self.__class__.__name__,
+                    cros_build_lib.CmdToStr(cmd))
+    else:
+      try:
+        return retry_stats.RetryWithStats(retry_stats.GSUTIL,
+                                          self._RetryFilter,
+                                          retries, cros_build_lib.run,
+                                          cmd, sleep=self._sleep_time,
+                                          extra_env=extra_env, **kwargs)
+      except cros_build_lib.RunCommandError as e:
+        raise GSCommandError(e.msg, e.result, e.exception)
+
+  def Copy(self, src_path, dest_path, acl=None, recursive=False,
+           skip_symlinks=True, auto_compress=False, **kwargs):
+    """Copy to/from GS bucket.
+
+    Canned ACL permissions can be specified on the gsutil cp command line.
+
+    More info:
+    https://developers.google.com/storage/docs/accesscontrol#applyacls
+
+    Args:
+      src_path: Fully qualified local path or full gs:// path of the src file.
+      dest_path: Fully qualified local path or full gs:// path of the dest
+                 file.
+      acl: One of the google storage canned_acls to apply.
+      recursive: Whether to copy recursively.
+      skip_symlinks: Skip symbolic links when copying recursively.
+      auto_compress: Automatically compress with gzip when uploading.
+
+    Returns:
+      The generation of the remote file.
+
+    Raises:
+      RunCommandError if the command failed despite retries.
+    """
+    # -v causes gs://bucket/path#generation to be listed in output.
+    cmd = ['cp', '-v']
+
+    # Certain versions of gsutil (at least 4.3) assume the source of a copy is
+    # a directory if the -r option is used. If it's really a file, gsutil will
+    # look like it's uploading it but not actually do anything. We'll work
+    # around that problem by surpressing the -r flag if we detect the source
+    # is a local file.
+    if recursive and not os.path.isfile(src_path):
+      cmd.append('-r')
+      if skip_symlinks:
+        cmd.append('-e')
+
+    if auto_compress:
+      cmd.append('-Z')
+
+    acl = self.acl if acl is None else acl
+    if acl is not None:
+      cmd += ['-a', acl]
+
+    with cros_build_lib.ContextManagerStack() as stack:
+      # Write the input into a tempfile if possible. This is needed so that
+      # gsutil can retry failed requests.  We allow the input to be a string
+      # or bytes regardless of the output encoding.
+      if src_path == '-' and kwargs.get('input') is not None:
+        f = stack.Add(tempfile.NamedTemporaryFile, mode='wb')
+        data = kwargs['input']
+        if isinstance(data, six.text_type):
+          data = data.encode('utf-8')
+        f.write(data)
+        f.flush()
+        del kwargs['input']
+        src_path = f.name
+
+      cmd += ['--', src_path, dest_path]
+
+      if not (PathIsGs(src_path) or PathIsGs(dest_path)):
+        # Don't retry on local copies.
+        kwargs.setdefault('retries', 0)
+
+      kwargs['capture_output'] = True
+      try:
+        result = self.DoCommand(cmd, **kwargs)
+        if self.dry_run:
+          return None
+
+        # Now we parse the output for the current generation number.  Example:
+        #   Created: gs://chromeos-throw-away-bucket/foo#1360630664537000.1
+        m = re.search(r'Created: .*#(\d+)([.](\d+))?\n', result.error)
+        if m:
+          return int(m.group(1))
+        else:
+          return None
+      except GSNoSuchKey as e:
+        # If the source was a local file, the error is a quirk of gsutil 4.5
+        # and should be ignored. If the source was remote, there might
+        # legitimately be no such file. See crbug.com/393419.
+        if os.path.isfile(src_path):
+          return None
+
+        # Temp log for crbug.com/642986, should be removed when the bug
+        # is fixed.
+        logging.warning('Copy Error: src %s dest %s: %s '
+                        '(Temp log for crbug.com/642986)',
+                        src_path, dest_path, e)
+        raise
+
+  def CreateWithContents(self, gs_uri, contents, **kwargs):
+    """Creates the specified file with specified contents.
+
+    Args:
+      gs_uri: The URI of a file on Google Storage.
+      contents: String or bytes with contents to write to the file.
+      kwargs: See additional options that Copy takes.
+
+    Raises:
+      See Copy.
+    """
+    self.Copy('-', gs_uri, input=contents, **kwargs)
+
+  # TODO: Merge LS() and List()?
+  def LS(self, path, **kwargs):
+    """Does a directory listing of the given gs path.
+
+    Args:
+      path: The path to get a listing of.
+      kwargs: See options that DoCommand takes.
+
+    Returns:
+      A list of paths that matched |path|.  Might be more than one if a
+      directory or path include wildcards/etc...
+    """
+    if self.dry_run:
+      return []
+
+    if not PathIsGs(path):
+      # gsutil doesn't support listing a local path, so just run 'ls'.
+      kwargs.pop('retries', None)
+      kwargs.pop('headers', None)
+      kwargs['capture_output'] = True
+      kwargs.setdefault('encoding', 'utf-8')
+      result = cros_build_lib.run(['ls', path], **kwargs)
+      return result.output.splitlines()
+    else:
+      return [x.url for x in self.List(path, **kwargs)]
+
+  def List(self, path, details=False, **kwargs):
+    """Does a directory listing of the given gs path.
+
+    Args:
+      path: The path to get a listing of.
+      details: Whether to include size/timestamp info.
+      kwargs: See options that DoCommand takes.
+
+    Returns:
+      A list of GSListResult objects that matched |path|.  Might be more
+      than one if a directory or path include wildcards/etc...
+    """
+    ret = []
+    if self.dry_run:
+      return ret
+
+    cmd = ['ls']
+    if details:
+      cmd += ['-l']
+    cmd += ['--', path]
+
+    # We always request the extended details as the overhead compared to a plain
+    # listing is negligible.
+    kwargs['stdout'] = True
+    lines = self.DoCommand(cmd, **kwargs).output.splitlines()
+
+    if details:
+      # The last line is expected to be a summary line.  Ignore it.
+      lines = lines[:-1]
+      ls_re = LS_LA_RE
+    else:
+      ls_re = LS_RE
+
+    # Handle optional fields.
+    intify = lambda x: int(x) if x else None
+
+    # Parse out each result and build up the results list.
+    for line in lines:
+      match = ls_re.search(line)
+      if not match:
+        raise GSContextException('unable to parse line: %s' % line)
+      if match.group('creation_time'):
+        timestamp = datetime.datetime.strptime(match.group('creation_time'),
+                                               DATETIME_FORMAT)
+      else:
+        timestamp = None
+
+      ret.append(GSListResult(
+          content_length=intify(match.group('content_length')),
+          creation_time=timestamp,
+          url=match.group('url'),
+          generation=intify(match.group('generation')),
+          metageneration=intify(match.group('metageneration'))))
+
+    return ret
+
+  def GetSize(self, path, **kwargs):
+    """Returns size of a single object (local or GS)."""
+    if not PathIsGs(path):
+      return os.path.getsize(path)
+    else:
+      return self.Stat(path, **kwargs).content_length
+
+  def Move(self, src_path, dest_path, **kwargs):
+    """Move/rename to/from GS bucket.
+
+    Args:
+      src_path: Fully qualified local path or full gs:// path of the src file.
+      dest_path: Fully qualified local path or full gs:// path of the dest file.
+      kwargs: See options that DoCommand takes.
+    """
+    cmd = ['mv', '--', src_path, dest_path]
+    return self.DoCommand(cmd, **kwargs)
+
+  def SetACL(self, upload_url, acl=None, **kwargs):
+    """Set access on a file already in google storage.
+
+    Args:
+      upload_url: gs:// url that will have acl applied to it.
+      acl: An ACL permissions file or canned ACL.
+      kwargs: See options that DoCommand takes.
+    """
+    if acl is None:
+      if not self.acl:
+        raise GSContextException(
+            'SetAcl invoked w/out a specified acl, nor a default acl.')
+      acl = self.acl
+
+    self.DoCommand(['acl', 'set', acl, upload_url], **kwargs)
+
+  def ChangeACL(self, upload_url, acl_args_file=None, acl_args=None, **kwargs):
+    """Change access on a file already in google storage with "acl ch".
+
+    Args:
+      upload_url: gs:// url that will have acl applied to it.
+      acl_args_file: A file with arguments to the gsutil acl ch command. The
+                     arguments can be spread across multiple lines. Comments
+                     start with a # character and extend to the end of the
+                     line. Exactly one of this argument or acl_args must be
+                     set.
+      acl_args: A list of arguments for the gsutil acl ch command. Exactly
+                one of this argument or acl_args must be set.
+      kwargs: See options that DoCommand takes.
+    """
+    if acl_args_file and acl_args:
+      raise GSContextException(
+          'ChangeACL invoked with both acl_args and acl_args set.')
+    if not acl_args_file and not acl_args:
+      raise GSContextException(
+          'ChangeACL invoked with neither acl_args nor acl_args set.')
+
+    if acl_args_file:
+      lines = osutils.ReadFile(acl_args_file).splitlines()
+      # Strip out comments.
+      lines = [x.split('#', 1)[0].strip() for x in lines]
+      acl_args = ' '.join([x for x in lines if x]).split()
+
+    # Some versions of gsutil bubble up precondition failures even when we
+    # didn't request it due to how ACL changes happen internally to gsutil.
+    # https://crbug.com/763450
+    # We keep the retry limit a bit low because DoCommand already has its
+    # own level of retries.
+    retry_util.RetryException(
+        GSContextPreconditionFailed, 3, self.DoCommand,
+        ['acl', 'ch'] + acl_args + [upload_url], **kwargs)
+
+  def Exists(self, path, **kwargs):
+    """Checks whether the given object exists.
+
+    Args:
+      path: Local path or gs:// url to check.
+      kwargs: Flags to pass to DoCommand.
+
+    Returns:
+      True if the path exists; otherwise returns False.
+    """
+    if not PathIsGs(path):
+      return os.path.exists(path)
+
+    try:
+      self.Stat(path, **kwargs)
+    except GSNoSuchKey:
+      return False
+
+    return True
+
+  def Remove(self, path, recursive=False, ignore_missing=False, **kwargs):
+    """Remove the specified file.
+
+    Args:
+      path: Full gs:// url of the file to delete.
+      recursive: Remove recursively starting at path.
+      ignore_missing: Whether to suppress errors about missing files.
+      kwargs: Flags to pass to DoCommand.
+    """
+    cmd = ['rm']
+    if 'recurse' in kwargs:
+      raise TypeError('"recurse" has been renamed to "recursive"')
+    if recursive:
+      cmd.append('-R')
+    cmd.append('--')
+    cmd.append(path)
+    try:
+      self.DoCommand(cmd, **kwargs)
+    except GSNoSuchKey:
+      if not ignore_missing:
+        raise
+
+  def GetGeneration(self, path):
+    """Get the generation and metageneration of the given |path|.
+
+    Returns:
+      A tuple of the generation and metageneration.
+    """
+    try:
+      res = self.Stat(path)
+    except GSNoSuchKey:
+      return 0, 0
+
+    return res.generation, res.metageneration
+
+  def Stat(self, path, **kwargs):
+    """Stat a GS file, and get detailed information.
+
+    Args:
+      path: A GS path for files to Stat. Wildcards are NOT supported.
+      kwargs: Flags to pass to DoCommand.
+
+    Returns:
+      A GSStatResult object with all fields populated.
+
+    Raises:
+      Assorted GSContextException exceptions.
+    """
+    try:
+      res = self.DoCommand(['stat', '--', path], stdout=True, **kwargs)
+    except GSCommandError as e:
+      # Because the 'gsutil stat' command logs errors itself (instead of
+      # raising errors internally like other commands), we have to look
+      # for errors ourselves.  See the related bug report here:
+      # https://github.com/GoogleCloudPlatform/gsutil/issues/288
+      # Example line:
+      # No URLs matched gs://bucket/file
+      if e.result.error and e.result.error.startswith('No URLs matched'):
+        raise GSNoSuchKey('Stat Error: No URLs matched %s.' % path)
+
+      # No idea what this is, so just choke.
+      raise
+
+    # In dryrun mode, DoCommand doesn't return an object, so we need to fake
+    # out the behavior ourselves.
+    if self.dry_run:
+      return GSStatResult(
+          creation_time=datetime.datetime.now(),
+          content_length=0,
+          content_type='application/octet-stream',
+          hash_crc32c='AAAAAA==',
+          hash_md5='',
+          etag='',
+          generation=0,
+          metageneration=0)
+
+    # We expect Stat output like the following. However, the Content-Language
+    # line appears to be optional based on how the file in question was
+    # created.
+    #
+    # gs://bucket/path/file:
+    #     Creation time:      Sat, 23 Aug 2014 06:53:20 GMT
+    #     Content-Language:   en
+    #     Content-Length:     74
+    #     Content-Type:       application/octet-stream
+    #     Hash (crc32c):      BBPMPA==
+    #     Hash (md5):         ms+qSYvgI9SjXn8tW/5UpQ==
+    #     ETag:               CNCgocbmqMACEAE=
+    #     Generation:         1408776800850000
+    #     Metageneration:     1
+
+    if not res.output.startswith('gs://'):
+      raise GSContextException('Unexpected stat output: %s' % res.output)
+
+    def _GetField(name, optional=False):
+      m = re.search(r'%s:\s*(.+)' % re.escape(name), res.output)
+      if m:
+        return m.group(1)
+      elif optional:
+        return None
+      else:
+        raise GSContextException('Field "%s" missing in "%s"' %
+                                 (name, res.output))
+
+    return GSStatResult(
+        creation_time=datetime.datetime.strptime(
+            _GetField('Creation time'), '%a, %d %b %Y %H:%M:%S %Z'),
+        content_length=int(_GetField('Content-Length')),
+        content_type=_GetField('Content-Type'),
+        hash_crc32c=_GetField('Hash (crc32c)'),
+        hash_md5=_GetField('Hash (md5)', optional=True),
+        etag=_GetField('ETag'),
+        generation=int(_GetField('Generation')),
+        metageneration=int(_GetField('Metageneration')))
+
+  def Counter(self, path):
+    """Return a GSCounter object pointing at a |path| in Google Storage.
+
+    Args:
+      path: The path to the counter in Google Storage.
+    """
+    return GSCounter(self, path)
+
+  def WaitForGsPaths(self, paths, timeout, period=10):
+    """Wait until a list of files exist in GS.
+
+    Args:
+      paths: The list of files to wait for.
+      timeout: Max seconds to wait for file to appear.
+      period: How often to check for files while waiting.
+
+    Raises:
+      timeout_util.TimeoutError if the timeout is reached.
+    """
+    # Copy the list of URIs to wait for, so we don't modify the callers context.
+    pending_paths = paths[:]
+
+    def _CheckForExistence():
+      pending_paths[:] = [x for x in pending_paths if not self.Exists(x)]
+
+    def _Retry(_return_value):
+      # Retry, if there are any pending paths left.
+      return pending_paths
+
+    timeout_util.WaitForSuccess(_Retry, _CheckForExistence,
+                                timeout=timeout, period=period)
+
+  def ContainsWildcard(self, url):
+    """Checks whether url_string contains a wildcard.
+
+    Args:
+      url: URL string to check.
+
+    Returns:
+      True if |url| contains a wildcard.
+    """
+    return bool(WILDCARD_REGEX.search(url))
+
+  def GetGsNamesWithWait(self, pattern, url, timeout=600, period=10,
+                         is_regex_pattern=False):
+    """Returns the google storage names specified by the given pattern.
+
+    This method polls Google Storage until the target files specified by the
+    pattern is available or until the timeout occurs. Because we may not know
+    the exact name of the target files, the method accepts a filename pattern,
+    to identify whether a file whose name matches the pattern exists
+    (e.g. use pattern '*_full_*' to search for the full payload
+    'chromeos_R17-1413.0.0-a1_x86-mario_full_dev.bin'). Returns the name only
+    if found before the timeout.
+
+    Warning: GS listing are not perfect, and are eventually consistent. Doing a
+    search for file existence is a 'best effort'. Calling code should be aware
+    and ready to handle that.
+
+    Args:
+      pattern: a path pattern (glob or regex) identifying the files we need.
+      url: URL of the Google Storage bucket.
+      timeout: how many seconds are we allowed to keep trying.
+      period: how many seconds to wait between attempts.
+      is_regex_pattern: Whether the pattern is a regex (otherwise a glob).
+
+    Returns:
+      The list of files matching the pattern in Google Storage bucket or None
+      if the files are not found and hit the timeout_util.TimeoutError.
+    """
+    def _GetGsName():
+      uploaded_list = [os.path.basename(p.url) for p in self.List(url)]
+
+      if is_regex_pattern:
+        filter_re = re.compile(pattern)
+        matching_names = [f for f in uploaded_list
+                          if filter_re.search(f) is not None]
+      else:
+        matching_names = fnmatch.filter(uploaded_list, pattern)
+
+      return matching_names
+
+    try:
+      matching_names = None
+      if not (is_regex_pattern or self.ContainsWildcard(pattern)):
+        try:
+          self.WaitForGsPaths(['%s/%s' % (url, pattern)], timeout)
+          return [os.path.basename(pattern)]
+        except GSCommandError:
+          pass
+
+      if not matching_names:
+        matching_names = timeout_util.WaitForSuccess(
+            lambda x: not x, _GetGsName, timeout=timeout, period=period)
+
+      logging.debug('matching_names=%s, is_regex_pattern=%r',
+                    matching_names, is_regex_pattern)
+      return matching_names
+    except timeout_util.TimeoutError:
+      return None
+
+
+def _FirstMatch(predicate, elems):
+  """Returns the first element matching the given |predicate|.
+
+  Args:
+    predicate: A function which takes an element and returns a bool
+    elems: A sequence of elements.
+  """
+  matches = [x for x in elems if predicate(x)]
+  return matches[0] if matches else None
+
+
+def _FirstSubstring(superstring, haystack):
+  """Returns the first elem of |haystack| which is a substring of |superstring|.
+
+  Args:
+    superstring: A string to search for substrings of.
+    haystack: A sequence of strings to search through.
+  """
+  return _FirstMatch(lambda s: s in superstring, haystack)
+
+
[email protected]
+def TemporaryURL(prefix):
+  """Context manager to generate a random URL.
+
+  At the end, the URL will be deleted.
+  """
+  url = '%s/chromite-temp/%s/%s/%s' % (constants.TRASH_BUCKET, prefix,
+                                       getpass.getuser(),
+                                       cros_build_lib.GetRandomString())
+  ctx = GSContext()
+  ctx.Remove(url, ignore_missing=True, recursive=True)
+  try:
+    yield url
+  finally:
+    ctx.Remove(url, ignore_missing=True, recursive=True)
diff --git a/utils/frozen_chromite/lib/locking.py b/utils/frozen_chromite/lib/locking.py
new file mode 100644
index 0000000..26e6ad4
--- /dev/null
+++ b/utils/frozen_chromite/lib/locking.py
@@ -0,0 +1,407 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Basic locking functionality."""
+
+from __future__ import print_function
+
+import contextlib
+import os
+import errno
+import fcntl
+import stat
+import tempfile
+
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+
+
+LOCKF = 'lockf'
+FLOCK = 'flock'
+
+
+class LockNotAcquiredError(Exception):
+  """Signals that the lock was not acquired."""
+
+
+class LockingError(Exception):
+  """Signals miscellaneous problems in the locking process."""
+
+
[email protected]
+def _optional_timer_context(timeout):
+  """Use the timeout_util.Timeout contextmanager if timeout is set."""
+  if timeout:
+    with timeout_util.Timeout(timeout):
+      yield
+  else:
+    yield
+
+
+class _Lock(cros_build_lib.MasterPidContextManager):
+  """Base lockf based locking.  Derivatives need to override _GetFd"""
+
+  def __init__(self, description=None, verbose=True, locktype=LOCKF,
+               blocking=True, blocking_timeout=None):
+    """Initialize this instance.
+
+    Two types of locks are available: LOCKF and FLOCK.
+
+    Use LOCKF (POSIX locks) if:
+      - you need to lock a file between processes created by the
+        parallel/multiprocess libraries
+
+    Use FLOCK (BSD locks) if these scenarios apply:
+      - you need to lock a file between shell scripts running the flock program
+      - you need the lock to be bound to the fd and thus inheritable across
+        execs
+
+    Note: These two locks are completely independent; using one on a path will
+          not block using the other on the same path.
+
+    Args:
+      path: On disk pathway to lock.  Can be a directory or a file.
+      description: A description for this lock- what is it protecting?
+      verbose: Verbose logging?
+      locktype: Type of lock to use (lockf or flock).
+      blocking: If True, use a blocking lock.
+      blocking_timeout: If not None, time is seconds to wait on blocking calls.
+    """
+    cros_build_lib.MasterPidContextManager.__init__(self)
+    self._verbose = verbose
+    self.description = description
+    self._fd = None
+    self.locking_mechanism = fcntl.flock if locktype == FLOCK else fcntl.lockf
+    # Store (to log) the locktype string.
+    self.locktype = locktype
+    self.blocking = blocking
+    self.blocking_timeout = blocking_timeout
+
+  @property
+  def fd(self):
+    if self._fd is None:
+      self._fd = self._GetFd()
+      # Ensure that all derivatives of this lock can't bleed the fd
+      # across execs.
+      fcntl.fcntl(self._fd, fcntl.F_SETFD,
+                  fcntl.fcntl(self._fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+    return self._fd
+
+  def _GetFd(self):
+    raise NotImplementedError(self, '_GetFd')
+
+  def _enforce_lock(self, flags, message):
+    # Try nonblocking first, if it fails, display the context/message,
+    # and then wait on the lock.
+    try:
+      self.locking_mechanism(self.fd, flags|fcntl.LOCK_NB)
+      return
+    except EnvironmentError as e:
+      if e.errno == errno.EDEADLK:
+        self.unlock()
+      elif e.errno != errno.EAGAIN:
+        raise
+    if self.description:
+      message = '%s: blocking (LOCK_NB) (%s) while %s' % (self.description,
+                                                          self.locktype,
+                                                          message)
+    if not self.blocking:
+      self.close()
+      raise LockNotAcquiredError(message)
+    if self._verbose:
+      logging.info(message)
+
+    try:
+      with _optional_timer_context(self.blocking_timeout):
+        self.locking_mechanism(self.fd, flags)
+    except timeout_util.TimeoutError:
+      description = self.description or 'locking._enforce_lock'
+      logging.error(
+          'Timed out after waiting %d seconds for blocking lock (%s): %s',
+          self.blocking_timeout, self.locktype, description)
+      raise
+    except EnvironmentError as e:
+      if e.errno != errno.EDEADLK:
+        message = ('%s: blocking wait failed errno %s'
+                   % (self.description, e))
+        raise
+      self.unlock()
+      self.locking_mechanism(self.fd, flags)
+    logging.debug('%s: lock has been acquired (%s), continuing.',
+                  self.description, self.locktype)
+
+  def lock(self, shared=False):
+    """Take a lock of type |shared|.
+
+    Any existing lock will be updated if need be.
+
+    Args:
+      shared: If True make the lock shared.
+
+    Returns:
+      self, allowing it to be used as a `with` target.
+
+    Raises:
+      IOError if the operation fails in some way.
+      LockNotAcquiredError if the lock couldn't be acquired (non-blocking
+        mode only).
+    """
+    self._enforce_lock(
+        fcntl.LOCK_SH if shared else fcntl.LOCK_EX,
+        'taking a %s lock' % ('shared' if shared else 'exclusive'))
+    return self
+
+  def read_lock(self, message='taking read lock'):
+    """Take a read lock (shared), downgrading from write if required.
+
+    Args:
+      message: A description of what/why this lock is being taken.
+
+    Returns:
+      self, allowing it to be used as a `with` target.
+
+    Raises:
+      IOError if the operation fails in some way.
+    """
+    self._enforce_lock(fcntl.LOCK_SH, message)
+    return self
+
+  def write_lock(self, message='taking write lock'):
+    """Take a write lock (exclusive), upgrading from read if required.
+
+    Note that if the lock state is being upgraded from read to write,
+    a deadlock potential exists- as such we *will* release the lock
+    to work around it.  Any consuming code should not assume that
+    transitioning from shared to exclusive means no one else has
+    gotten at the critical resource in between for this reason.
+
+    Args:
+      message: A description of what/why this lock is being taken.
+
+    Returns:
+      self, allowing it to be used as a `with` target.
+
+    Raises:
+      IOError if the operation fails in some way.
+    """
+    self._enforce_lock(fcntl.LOCK_EX, message)
+    return self
+
+  def unlock(self):
+    """Release any locks held.  Noop if no locks are held.
+
+    Raises:
+      IOError if the operation fails in some way.
+    """
+    if self._fd is not None:
+      logging.debug('%s: lock is being released (%s).',
+                    self.description, self.locktype)
+      self.locking_mechanism(self._fd, fcntl.LOCK_UN)
+
+  def __del__(self):
+    # TODO(ferringb): Convert this to snakeoil.weakref.WeakRefFinalizer
+    # if/when that rebasing occurs.
+    self.close()
+
+  def close(self):
+    """Release the underlying lock and close the fd."""
+    if self._fd is not None:
+      self.unlock()
+      os.close(self._fd)
+      self._fd = None
+
+  def _enter(self):
+    # Force the fd to be opened via touching the property.
+    # We do this to ensure that even if entering a context w/out a lock
+    # held, we can do locking in that critical section if the code requests it.
+    # pylint: disable=pointless-statement
+    self.fd
+    return self
+
+  def _exit(self, exc_type, exc, exc_tb):
+    try:
+      self.unlock()
+    finally:
+      self.close()
+
+  def IsLocked(self):
+    """Return True if the lock is grabbed."""
+    return bool(self._fd)
+
+
+class FileLock(_Lock):
+  """Use a specified file as a locking mechanism."""
+
+  def __init__(self, path, description=None, verbose=True,
+               locktype=LOCKF, world_writable=False, blocking=True,
+               blocking_timeout=None):
+    """Initializer for FileLock.
+
+    Args:
+      path: On disk pathway to lock.  Can be a directory or a file.
+      description: A description for this lock- what is it protecting?
+      verbose: Verbose logging?
+      locktype: Type of lock to use (lockf or flock).
+      world_writable: If true, the lock file will be created as root and be made
+        writable to all users.
+      blocking: If True, use a blocking lock.
+      blocking_timeout: If not None, time is seconds to wait on blocking calls.
+    """
+    if description is None:
+      description = 'lock %s' % (path,)
+    _Lock.__init__(self, description=description, verbose=verbose,
+                   locktype=locktype, blocking=blocking,
+                   blocking_timeout=blocking_timeout)
+    self.path = os.path.abspath(path)
+    self.world_writable = world_writable
+
+  def _GetFd(self):
+    if self.world_writable:
+      create = True
+      try:
+        create = stat.S_IMODE(os.stat(self.path).st_mode) != 0o666
+      except OSError as e:
+        if e.errno != errno.ENOENT:
+          raise
+      if create:
+        osutils.SafeMakedirs(os.path.dirname(self.path), sudo=True)
+        cros_build_lib.sudo_run(['touch', self.path], print_cmd=False)
+        cros_build_lib.sudo_run(['chmod', '666', self.path], print_cmd=False)
+
+    # If we're on py3.4 and this attribute is exposed, use it to close
+    # the threading race between open and fcntl setting; this is
+    # extremely paranoid code, but might as well.
+    cloexec = getattr(os, 'O_CLOEXEC', 0)
+    # There exist race conditions where the lock may be created by
+    # root, thus denying subsequent accesses from others. To prevent
+    # this, we create the lock with mode 0o666.
+    try:
+      value = os.umask(000)
+      fd = os.open(self.path, os.W_OK|os.O_CREAT|cloexec, 0o666)
+    finally:
+      os.umask(value)
+    return fd
+
+
+class ProcessLock(_Lock):
+  """Process level locking visible to parent/child only.
+
+  This lock is basically a more robust version of what
+  multiprocessing.Lock does.  That implementation uses semaphores
+  internally which require cleanup/deallocation code to run to release
+  the lock; a SIGKILL hitting the process holding the lock violates those
+  assumptions leading to a stuck lock.
+
+  Thus this implementation is based around locking of a deleted tempfile;
+  lockf locks are guranteed to be released once the process/fd is closed.
+  """
+
+  def _GetFd(self):
+    with tempfile.TemporaryFile() as f:
+      # We don't want to hold onto the object indefinitely; we just want
+      # the fd to a temporary inode, preferably one that isn't vfs accessible.
+      # Since TemporaryFile closes the fd once the object is GC'd, we just
+      # dupe the fd so we retain a copy, while the original TemporaryFile
+      # goes away.
+      return os.dup(f.fileno())
+
+
+class PortableLinkLock(object):
+  """A more primitive lock that relies on the atomicity of creating hardlinks.
+
+  Use this lock if you need to be compatible with shadow utils like groupadd
+  or useradd.
+  """
+
+  def __init__(self, path, max_retry=0, sleep=1):
+    """Construct an instance.
+
+    Args:
+      path: path to file to lock on.  Multiple processes attempting to lock the
+        same path will compete for a system wide lock.
+      max_retry: maximum number of times to attempt to acquire the lock.
+      sleep: See retry_util.GenericRetry's sleep parameter.
+    """
+    self._path = path
+    self._target_path = None
+    # These two poorly named variables are just passed straight through to
+    # retry_util.RetryException.
+    self._max_retry = max_retry
+    self._sleep = sleep
+
+  def __enter__(self):
+    fd, self._target_path = tempfile.mkstemp(
+        prefix=self._path + '.chromite.portablelock.')
+    os.close(fd)
+    try:
+      retry_util.RetryException(OSError, self._max_retry,
+                                os.link, self._target_path, self._path,
+                                sleep=self._sleep)
+    except OSError:
+      raise LockNotAcquiredError('Timeout while trying to lock %s' % self._path)
+    finally:
+      osutils.SafeUnlink(self._target_path)
+
+    return self
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    try:
+      if self._target_path:
+        osutils.SafeUnlink(self._target_path)
+    finally:
+      osutils.SafeUnlink(self._path)
+
+
+class PipeLock(object):
+  """A simple one-way lock based on pipe().
+
+  This is used when code is calling os.fork() directly and needs to synchronize
+  behavior between the two.  The same process should not try to use Wait/Post
+  as it will just see its own results.  If you need bidirection locks, you'll
+  need to create two yourself.
+
+  Be sure to delete the lock when you're done to prevent fd leakage.
+  """
+
+  def __init__(self):
+    # TODO(vapier): Simplify this when we're Python 3 only.
+    # pylint: disable=using-constant-test
+    pipe2 = getattr(os, 'pipe2', None)
+    if pipe2:
+      cloexec = getattr(os, 'O_CLOEXEC', 0)
+      # Pylint-1.7 is unable to handle this conditional logic.
+      # pylint: disable=not-callable
+      pipes = pipe2(cloexec)
+    else:
+      pipes = os.pipe()
+    self.read_fd, self.write_fd = pipes
+
+  def Wait(self, size=1):
+    """Read |size| bytes from the pipe.
+
+    Args:
+      size: How many bytes to read.  It must match the length of |data| passed
+        by the other end during its call to Post.
+
+    Returns:
+      The data read back.
+    """
+    return os.read(self.read_fd, size)
+
+  def Post(self, data=b'!'):
+    """Write |data| to the pipe.
+
+    Args:
+      data: The data to send to the other side calling Wait.  It must be of the
+        exact length that is passed to Wait.
+    """
+    os.write(self.write_fd, data)
+
+  def __del__(self):
+    os.close(self.read_fd)
+    os.close(self.write_fd)
diff --git a/utils/frozen_chromite/lib/metrics.py b/utils/frozen_chromite/lib/metrics.py
new file mode 100644
index 0000000..af47921
--- /dev/null
+++ b/utils/frozen_chromite/lib/metrics.py
@@ -0,0 +1,861 @@
+# -*- coding: utf-8 -*-
+# Copyright 2016 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Wrapper library around ts_mon.
+
+This library provides some wrapper functionality around ts_mon, to make it more
+friendly to developers. It also provides import safety, in case ts_mon is not
+deployed with your code.
+"""
+
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import contextlib
+import ssl
+import time
+from functools import wraps
+
+import six
+from six.moves import queue as Queue
+
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+
+try:
+  from infra_libs import ts_mon
+except (ImportError, RuntimeError):
+  ts_mon = None
+
+
+# This number is chosen because 1.16^100 seconds is about
+# 32 days. This is a good compromise between bucket size
+# and dynamic range.
+_SECONDS_BUCKET_FACTOR = 1.16
+
+# If none, we create metrics in this process. Otherwise, we send metrics via
+# this Queue to a dedicated flushing processes.
+# These attributes are set by chromite.lib.ts_mon_config.SetupTsMonGlobalState.
+FLUSHING_PROCESS = None
+MESSAGE_QUEUE = None
+
+_MISSING = object()
+
+MetricCall = collections.namedtuple('MetricCall', [
+    'metric_name', 'metric_args', 'metric_kwargs',
+    'method', 'method_args', 'method_kwargs',
+    'reset_after'
+])
+
+
+def _FlushingProcessClosed():
+  """Returns whether the metrics flushing process has been closed."""
+  return (FLUSHING_PROCESS is not None and
+          FLUSHING_PROCESS.exitcode is not None)
+
+
+class ProxyMetric(object):
+  """Redirects any method calls to the message queue."""
+  def __init__(self, metric, metric_args, metric_kwargs):
+    self.metric = metric
+    self.metric_args = metric_args
+    self.reset_after = metric_kwargs.pop('reset_after', False)
+    self.metric_kwargs = metric_kwargs
+
+  def __getattr__(self, method_name):
+    """Redirects all method calls to the MESSAGE_QUEUE."""
+    def enqueue(*args, **kwargs):
+      if not _FlushingProcessClosed():
+        try:
+          MESSAGE_QUEUE.put_nowait(
+              MetricCall(
+                  metric_name=self.metric,
+                  metric_args=self.metric_args,
+                  metric_kwargs=self.metric_kwargs,
+                  method=method_name,
+                  method_args=args,
+                  method_kwargs=kwargs,
+                  reset_after=self.reset_after))
+        except Queue.Full:
+          logging.warning(
+              "Metrics queue is full; skipped sending metric '%s'",
+              self.metric)
+      else:
+        try:
+          exit_code = FLUSHING_PROCESS.exitcode
+        except AttributeError:
+          exit_code = None
+        logging.warning(
+            'Flushing process has been closed (exit code %s),'
+            " skipped sending metric '%s'",
+            exit_code,
+            self.metric)
+
+    return enqueue
+
+
+def _Indirect(fn):
+  """Decorates a function to be indirect If MESSAGE_QUEUE is set.
+
+  If MESSAGE_QUEUE is set, the indirect function will return a proxy metrics
+  object; otherwise, it behaves normally.
+  """
+  @wraps(fn)
+  def AddToQueueIfPresent(*args, **kwargs):
+    if MESSAGE_QUEUE:
+      return ProxyMetric(fn.__name__, args, kwargs)
+    else:
+      # Whether to reset the metric after the flush; this is only used by
+      # |ProxyMetric|, so remove this from the kwargs.
+      kwargs.pop('reset_after', None)
+      return fn(*args, **kwargs)
+  return AddToQueueIfPresent
+
+
+class MockMetric(object):
+  """Mock metric object, to be returned if ts_mon is not set up."""
+
+  def _mock_method(self, *args, **kwargs):
+    pass
+
+  def __getattr__(self, _):
+    return self._mock_method
+
+
+def _ImportSafe(fn):
+  """Decorator which causes |fn| to return MockMetric if ts_mon not imported."""
+  @wraps(fn)
+  def wrapper(*args, **kwargs):
+    if ts_mon:
+      return fn(*args, **kwargs)
+    else:
+      return MockMetric()
+
+  return wrapper
+
+
+class FieldSpecAdapter(object):
+  """Infers the types of fields values to work around field_spec requirement.
+
+  See: https://chromium-review.googlesource.com/c/432120/ for the change
+  which added a required field_spec argument. This class is a temporary
+  workaround to allow inferring the field_spec if is not provided.
+  """
+  FIELD_CLASSES = {} if ts_mon is None else {
+      bool: ts_mon.BooleanField,
+      int: ts_mon.IntegerField,
+      str: ts_mon.StringField,
+      six.text_type: ts_mon.StringField,
+  }
+
+  def __init__(self, metric_cls, *args, **kwargs):
+    self._metric_cls = metric_cls
+    self._args = args
+    self._kwargs = kwargs
+    self._instance = _MISSING
+
+  def __getattr__(self, prop):
+    """Return a wrapper which constructs the metric object on demand.
+
+    Args:
+      prop: The property name
+
+    Returns:
+      If self._instance has been created, the instance's .|prop| property,
+      otherwise, a wrapper function which creates the ._instance and then
+      calls the |prop| method on the instance.
+    """
+    if self._instance is not _MISSING:
+      return getattr(self._instance, prop)
+
+    def func(*args, **kwargs):
+      if self._instance is not _MISSING:
+        return getattr(self._instance, prop)(*args, **kwargs)
+      fields = FieldSpecAdapter._InferFields(prop, args, kwargs)
+      self._kwargs['field_spec'] = FieldSpecAdapter._InferFieldSpec(fields)
+      self._instance = self._metric_cls(*self._args, **self._kwargs)
+      return getattr(self._instance, prop)(*args, **kwargs)
+
+    func.__name__ = prop
+    return func
+
+  @staticmethod
+  def _InferFields(method_name, args, kwargs):
+    """Infers the fields argument.
+
+    Args:
+      method_name: The method called.
+      args: The args list
+      kwargs: The keyword args
+    """
+    if 'fields' in kwargs:
+      return kwargs['fields']
+
+    if method_name == 'increment' and args:
+      return args[0]
+
+    if len(args) >= 2:
+      return args[1]
+
+  @staticmethod
+  def _InferFieldSpec(fields):
+    """Infers the fields types from the given fields.
+
+    Args:
+      fields: A dictionary with metric fields.
+    """
+    if not fields or not ts_mon:
+      return None
+
+    return [FieldSpecAdapter.FIELD_CLASSES[type(v)](field)
+            for (field, v) in sorted(fields.items())]
+
+
+def _OptionalFieldSpec(fn):
+  """Decorates a function to allow an optional description and field_spec."""
+  @wraps(fn)
+  def wrapper(*args, **kwargs):
+    kwargs = dict(**kwargs)  # It's bad practice to mutate **kwargs
+    # Slightly different than .setdefault, this line sets a default even when
+    # the key is present (as long as the value is not truthy). Empty or None is
+    # not allowed for descriptions.
+    kwargs['description'] = kwargs.get('description') or 'No description.'
+    if 'field_spec' in kwargs and kwargs['field_spec'] is not _MISSING:
+      return fn(*args, **kwargs)
+    else:
+      return FieldSpecAdapter(fn, *args, **kwargs)
+  return wrapper
+
+
+def _Metric(fn):
+  """A pipeline of decorators to apply to our metric constructors."""
+  return _OptionalFieldSpec(_ImportSafe(_Indirect(fn)))
+
+
+# This is needed for the reset_after flag used by @Indirect.
+# pylint: disable=unused-argument
+
+@_Metric
+def CounterMetric(name, reset_after=False, description=None,
+                  field_spec=_MISSING, start_time=None):
+  """Returns a metric handle for a counter named |name|."""
+  return ts_mon.CounterMetric(name,
+                              description=description, field_spec=field_spec,
+                              start_time=start_time)
+Counter = CounterMetric
+
+
+@_Metric
+def GaugeMetric(name, reset_after=False, description=None, field_spec=_MISSING):
+  """Returns a metric handle for a gauge named |name|."""
+  return ts_mon.GaugeMetric(name, description=description,
+                            field_spec=field_spec)
+Gauge = GaugeMetric
+
+
+@_Metric
+def CumulativeMetric(name, reset_after=False, description=None,
+                     field_spec=_MISSING):
+  """Returns a metric handle for a cumulative float named |name|."""
+  return ts_mon.CumulativeMetric(name, description=description,
+                                 field_spec=field_spec)
+
+
+@_Metric
+def StringMetric(name, reset_after=False, description=None,
+                 field_spec=_MISSING):
+  """Returns a metric handle for a string named |name|."""
+  return ts_mon.StringMetric(name, description=description,
+                             field_spec=field_spec)
+String = StringMetric
+
+
+@_Metric
+def BooleanMetric(name, reset_after=False, description=None,
+                  field_spec=_MISSING):
+  """Returns a metric handle for a boolean named |name|."""
+  return ts_mon.BooleanMetric(name, description=description,
+                              field_spec=field_spec)
+Boolean = BooleanMetric
+
+
+@_Metric
+def FloatMetric(name, reset_after=False, description=None, field_spec=_MISSING):
+  """Returns a metric handle for a float named |name|."""
+  return ts_mon.FloatMetric(name, description=description,
+                            field_spec=field_spec)
+Float = FloatMetric
+
+
+@_Metric
+def CumulativeDistributionMetric(name, reset_after=False, description=None,
+                                 bucketer=None, field_spec=_MISSING):
+  """Returns a metric handle for a cumulative distribution named |name|."""
+  return ts_mon.CumulativeDistributionMetric(
+      name, description=description, bucketer=bucketer, field_spec=field_spec)
+CumulativeDistribution = CumulativeDistributionMetric
+
+
+@_Metric
+def DistributionMetric(name, reset_after=False, description=None,
+                       bucketer=None, field_spec=_MISSING):
+  """Returns a metric handle for a distribution named |name|."""
+  return ts_mon.NonCumulativeDistributionMetric(
+      name, description=description, bucketer=bucketer, field_spec=field_spec)
+Distribution = DistributionMetric
+
+
+@_Metric
+def CumulativeSmallIntegerDistribution(name, reset_after=False,
+                                       description=None, field_spec=_MISSING):
+  """Returns a metric handle for a cumulative distribution named |name|.
+
+  This differs slightly from CumulativeDistribution, in that the underlying
+  metric uses a uniform bucketer rather than a geometric one.
+
+  This metric type is suitable for holding a distribution of numbers that are
+  nonnegative integers in the range of 0 to 100.
+  """
+  return ts_mon.CumulativeDistributionMetric(
+      name,
+      bucketer=ts_mon.FixedWidthBucketer(1),
+      description=description,
+      field_spec=field_spec)
+
+
+@_Metric
+def CumulativeSecondsDistribution(name, scale=1, reset_after=False,
+                                  description=None, field_spec=_MISSING):
+  """Returns a metric handle for a cumulative distribution named |name|.
+
+  The distribution handle returned by this method is better suited than the
+  default one for recording handling times, in seconds.
+
+  This metric handle has bucketing that is optimized for time intervals
+  (in seconds) in the range of 1 second to 32 days. Use |scale| to adjust this
+  (e.g. scale=0.1 covers a range from .1 seconds to 3.2 days).
+
+  Args:
+    name: string name of metric
+    scale: scaling factor of buckets, and size of the first bucket. default: 1
+    reset_after: Should the metric be reset after reporting.
+    description: A string description of the metric.
+    field_spec: A sequence of ts_mon.Field objects to specify the field schema.
+  """
+  b = ts_mon.GeometricBucketer(growth_factor=_SECONDS_BUCKET_FACTOR,
+                               scale=scale)
+  return ts_mon.CumulativeDistributionMetric(
+      name, bucketer=b, units=ts_mon.MetricsDataUnits.SECONDS,
+      description=description, field_spec=field_spec)
+
+SecondsDistribution = CumulativeSecondsDistribution
+
+
+@_Metric
+def PercentageDistribution(
+    name, num_buckets=1000, reset_after=False,
+    description=None, field_spec=_MISSING):
+  """Returns a metric handle for a cumulative distribution for percentage.
+
+  The distribution handle returned by this method is better suited for reporting
+  percentage values than the default one. The bucketing is optimized for values
+  in [0,100].
+
+  Args:
+    name: The name of this metric.
+    num_buckets: This metric buckets the percentage values before
+        reporting. This argument controls the number of the bucket the range
+        [0,100] is divided in. The default gives you 0.1% resolution.
+    reset_after: Should the metric be reset after reporting.
+    description: A string description of the metric.
+    field_spec: A sequence of ts_mon.Field objects to specify the field schema.
+  """
+  # The last bucket actually covers [100, 100 + 1.0/num_buckets), so it
+  # corresponds to values that exactly match 100%.
+  bucket_width = 100 / num_buckets
+  b = ts_mon.FixedWidthBucketer(bucket_width, num_buckets)
+  return ts_mon.CumulativeDistributionMetric(
+      name, bucketer=b,
+      description=description, field_spec=field_spec)
+
+
[email protected]
+def SecondsTimer(name, fields=None, description=None, field_spec=_MISSING,
+                 scale=1, record_on_exception=True, add_exception_field=False):
+  """Record the time of an operation to a CumulativeSecondsDistributionMetric.
+
+  Records the time taken inside of the context block, to the
+  CumulativeSecondsDistribution named |name|, with the given fields.
+
+  Examples:
+    # Time the doSomething() call, with field values that are independent of the
+    # results of the operation.
+    with SecondsTimer('timer/name', fields={'foo': 'bar'},
+                      description='My timer',
+                      field_spec=[ts_mon.StringField('foo'),
+                                  ts_mon.BooleanField('success')]):
+      doSomething()
+
+    # Time the doSomethingElse call, with field values that depend on the
+    # results of that operation. Note that it is important that a default value
+    # is specified for these fields, in case an exception is thrown by
+    # doSomethingElse()
+    f = {'success': False, 'foo': 'bar'}
+    with SecondsTimer('timer/name', fields=f, description='My timer',
+                      field_spec=[ts_mon.StringField('foo')]) as c:
+      doSomethingElse()
+      c['success'] = True
+
+    # Incorrect Usage!
+    with SecondsTimer('timer/name', description='My timer') as c:
+      doSomething()
+      c['foo'] = bar # 'foo' is not a valid field, because no default
+                     # value for it was specified in the context constructor.
+                     # It will be silently ignored.
+
+  Args:
+    name: The name of the metric to create
+    fields: The fields of the metric to create.
+    description: A string description of the metric.
+    field_spec: A sequence of ts_mon.Field objects to specify the field schema.
+    scale: A float to scale the CumulativeSecondsDistribution buckets by.
+    record_on_exception: Whether to record metrics if an exception is raised.
+    add_exception_field: Whether to add a BooleanField('encountered_exception')
+        to the FieldSpec provided, and set its value to True iff an exception
+        was raised in the context.
+  """
+  if field_spec is not None and field_spec is not _MISSING:
+    field_spec.append(ts_mon.BooleanField('encountered_exception'))
+
+  m = CumulativeSecondsDistribution(
+      name, scale=scale, description=description, field_spec=field_spec)
+  f = fields or {}
+  f = dict(f)
+  keys = list(f)
+  t0 = _GetSystemClock()
+
+  error = True
+  try:
+    yield f
+    error = False
+  finally:
+    if record_on_exception and add_exception_field:
+      keys.append('encountered_exception')
+      f.setdefault('encountered_exception', error)
+    # Filter out keys that were not part of the initial key set. This is to
+    # avoid inconsistent fields.
+    # TODO(akeshet): Doing this filtering isn't super efficient. Would be better
+    # to implement some key-restricted subclass or wrapper around dict, and just
+    # yield that above rather than yielding a regular dict.
+    if record_on_exception or not error:
+      dt = _GetSystemClock() - t0
+      # TODO(ayatane): Handle backward clock jumps.  See _GetSystemClock.
+      if dt >= 0:
+        m.add(dt, fields={k: f[k] for k in keys})
+
+
+def SecondsTimerDecorator(name, fields=None, description=None,
+                          field_spec=_MISSING, scale=1,
+                          record_on_exception=True, add_exception_field=False):
+  """Decorator to time the duration of function calls.
+
+  Examples:
+    @SecondsTimerDecorator('timer/name', fields={'foo': 'bar'},
+                           description='My timer',
+                           field_spec=[ts_mon.StringField('foo')])
+    def Foo(bar):
+      return doStuff()
+
+    is equivalent to
+
+    def Foo(bar):
+      with SecondsTimer('timer/name', fields={'foo': 'bar'},
+                        description='My timer',
+                        field_spec=[ts_mon.StringField('foo')])
+        return doStuff()
+
+  Args:
+    name: The name of the metric to create
+    fields: The fields of the metric to create
+    description: A string description of the metric.
+    field_spec: A sequence of ts_mon.Field objects to specify the field schema.
+    scale: A float to scale the distrubtion by
+    record_on_exception: Whether to record metrics if an exception is raised.
+    add_exception_field: Whether to add a BooleanField('encountered_exception')
+        to the FieldSpec provided, and set its value to True iff an exception
+        was raised in the context.
+  """
+  def decorator(fn):
+    @wraps(fn)
+    def wrapper(*args, **kwargs):
+      with SecondsTimer(name, fields=fields, description=description,
+                        field_spec=field_spec, scale=scale,
+                        record_on_exception=record_on_exception,
+                        add_exception_field=add_exception_field):
+        return fn(*args, **kwargs)
+
+    return wrapper
+
+  return decorator
+
+
[email protected]
+def SecondsInstanceTimer(name, fields=None, description=None,
+                         field_spec=_MISSING, record_on_exception=True,
+                         add_exception_field=False):
+  """Record the time of an operation to a FloatMetric.
+
+  Records the time taken inside of the context block, to the
+  Float metric named |name|, with the given fields.  This is
+  a non-cumulative metric; this represents the absolute time
+  taken for a specific block.  The duration is stored in a float
+  to provide flexibility in the future for higher accuracy.
+
+  Examples:
+    # Time the doSomething() call, with field values that are independent of the
+    # results of the operation.
+    with SecondsInstanceTimer('timer/name', fields={'foo': 'bar'},
+                              description='My timer',
+                              field_spec=[ts_mon.StringField('foo'),
+                                          ts_mon.BooleanField('success')]):
+      doSomething()
+
+    # Time the doSomethingElse call, with field values that depend on the
+    # results of that operation. Note that it is important that a default value
+    # is specified for these fields, in case an exception is thrown by
+    # doSomethingElse()
+    f = {'success': False, 'foo': 'bar'}
+    with SecondsInstanceTimer('timer/name', fields=f, description='My timer',
+                              field_spec=[ts_mon.StringField('foo')]) as c:
+      doSomethingElse()
+      c['success'] = True
+
+    # Incorrect Usage!
+    with SecondsInstanceTimer('timer/name', description='My timer') as c:
+      doSomething()
+      c['foo'] = bar # 'foo' is not a valid field, because no default
+                     # value for it was specified in the context constructor.
+                     # It will be silently ignored.
+
+  Args:
+    name: The name of the metric to create
+    fields: The fields of the metric to create.
+    description: A string description of the metric.
+    field_spec: A sequence of ts_mon.Field objects to specify the field schema.
+    record_on_exception: Whether to record metrics if an exception is raised.
+    add_exception_field: Whether to add a BooleanField('encountered_exception')
+        to the FieldSpec provided, and set its value to True iff an exception
+        was raised in the context.
+
+  Yields:
+    Float based metric measing the duration of execution.
+  """
+  if field_spec is not None and field_spec is not _MISSING:
+    field_spec.append(ts_mon.BooleanField('encountered_exception'))
+
+  m = FloatMetric(name, description=description, field_spec=field_spec)
+  f = dict(fields or {})
+  keys = list(f)
+  t0 = _GetSystemClock()
+
+  error = True
+  try:
+    yield f
+    error = False
+  finally:
+    if record_on_exception and add_exception_field:
+      keys.append('encountered_exception')
+      f.setdefault('encountered_exception', error)
+    # Filter out keys that were not part of the initial key set. This is to
+    # avoid inconsistent fields.
+    # TODO(akeshet): Doing this filtering isn't super efficient. Would be better
+    # to implement some key-restricted subclass or wrapper around dict, and just
+    # yield that above rather than yielding a regular dict.
+    if record_on_exception or not error:
+      dt = _GetSystemClock() - t0
+      m.set(dt, fields={k: f[k] for k in keys})
+
+
+def SecondsInstanceTimerDecorator(name, fields=None, description=None,
+                                  field_spec=_MISSING,
+                                  record_on_exception=True,
+                                  add_exception_field=False):
+  """Decorator to time the gauge duration of function calls.
+
+  Examples:
+    @SecondsInstanceTimerDecorator('timer/name', fields={'foo': 'bar'},
+                                   description='My timer',
+                                   field_spec=[ts_mon.StringField('foo'),
+                                               ts_mon.BooleanField('success')]):
+
+    def Foo(bar):
+      return doStuff()
+
+    is equivalent to
+
+    def Foo(bar):
+      with SecondsInstanceTimer('timer/name', fields={'foo': 'bar'},
+                                description='My timer',
+                                field_spec=[ts_mon.StringField('foo'),
+                                            ts_mon.BooleanField('success')]):
+        return doStuff()
+
+  Args:
+    name: The name of the metric to create
+    fields: The fields of the metric to create
+    description: A string description of the metric.
+    field_spec: A sequence of ts_mon.Field objects to specify the field schema.
+    record_on_exception: Whether to record metrics if an exception is raised.
+    add_exception_field: Whether to add a BooleanField('encountered_exception')
+        to the FieldSpec provided, and set its value to True iff an exception
+        was raised in the context.
+
+  Returns:
+    A SecondsInstanceTimer metric decorator.
+  """
+  def decorator(fn):
+    @wraps(fn)
+    def wrapper(*args, **kwargs):
+      with SecondsInstanceTimer(name, fields=fields, description=description,
+                                field_spec=field_spec,
+                                record_on_exception=record_on_exception,
+                                add_exception_field=add_exception_field):
+        return fn(*args, **kwargs)
+
+    return wrapper
+
+  return decorator
+
+
[email protected]
+def SuccessCounter(name, fields=None, description=None, field_spec=_MISSING):
+  """Create a counter that tracks if something succeeds.
+
+  Args:
+    name: The name of the metric to create
+    fields: The fields of the metric
+    description: A string description of the metric.
+    field_spec: A sequence of ts_mon.Field objects to specify the field schema.
+  """
+  c = Counter(name)
+  f = fields or {}
+  f = f.copy()
+  # We add in the additional field success.
+  keys = list(f) + ['success']
+  success = False
+  try:
+    yield f
+    success = True
+  finally:
+    f.setdefault('success', success)
+    f = {k: f[k] for k in keys}
+    c.increment(fields=f)
+
+
[email protected]
+def Presence(name, fields=None, description=None, field_spec=_MISSING):
+  """A counter of 'active' things.
+
+  This keeps track of how many name's are active at any given time. However,
+  it's only suitable for long running tasks, since the initial true value may
+  never be written out if the task doesn't run for at least a minute.
+  """
+  b = Boolean(name, description=None, field_spec=field_spec)
+  b.set(True, fields=fields)
+  try:
+    yield
+  finally:
+    b.set(False, fields=fields)
+
+
+class RuntimeBreakdownTimer(object):
+  """Record the time of an operation and the breakdown into sub-steps.
+
+  Examples:
+    with RuntimeBreakdownTimer('timer/name', fields={'foo':'bar'},
+                               description='My timer',
+                               field_spec=[ts_mon.StringField('foo')]) as timer:
+      with timer.Step('first_step'):
+        doFirstStep()
+      with timer.Step('second_step'):
+        doSecondStep()
+      # The time spent next will show up under .../timer/name/breakdown_no_step
+      doSomeNonStepWork()
+
+  This will emit the following metrics:
+  - .../timer/name/total_duration - A CumulativeSecondsDistribution metric for
+        the time spent inside the outer with block.
+  - .../timer/name/breakdown/first_step and
+    .../timer/name/breakdown/second_step - PercentageDistribution metrics for
+        the fraction of time devoted to each substep.
+  - .../timer/name/breakdown_unaccounted - PercentageDistribution metric for the
+        fraction of time that is not accounted for in any of the substeps.
+  - .../timer/name/bucketing_loss - PercentageDistribution metric buckets values
+        before reporting them as distributions. This causes small errors in the
+        reported values because they are rounded to the reported buckets lower
+        bound. This is a CumulativeMetric measuring the total rounding error
+        accrued in reporting all the percentages. The worst case bucketing loss
+        for x steps is (x+1)/10. So, if you time across 9 steps, you should
+        expect no more than 1% rounding error.
+  [experimental]
+  - .../timer/name/duration_breakdown - A Float metric, with one stream per Step
+        indicating the ratio of time spent in that step. The different steps are
+        differentiated via a field with key 'step_name'. Since some of the time
+        can be spent outside any steps, these ratios will sum to <= 1.
+
+  NB: This helper can only be used if the field values are known at the
+  beginning of the outer context and do not change as a result of any of the
+  operations timed.
+  """
+
+  PERCENT_BUCKET_COUNT = 1000
+
+  _StepMetrics = collections.namedtuple('_StepMetrics', ['name', 'time_s'])
+
+  def __init__(self, name, fields=None, description=None, field_spec=_MISSING):
+    self._name = name
+    self._fields = fields
+    self._field_spec = field_spec
+    self._description = description
+    self._outer_t0 = None
+    self._total_time_s = 0
+    self._inside_step = False
+    self._step_metrics = []
+
+  def __enter__(self):
+    self._outer_t0 = _GetSystemClock()
+    return self
+
+  def __exit__(self, _type, _value, _traceback):
+    self._RecordTotalTime()
+
+    outer_timer = CumulativeSecondsDistribution(
+        '%s/total_duration' % (self._name,),
+        field_spec=self._field_spec,
+        description=self._description)
+    outer_timer.add(self._total_time_s, fields=self._fields)
+
+    for name, percent in self._GetStepBreakdowns().items():
+      step_metric = PercentageDistribution(
+          '%s/breakdown/%s' % (self._name, name),
+          num_buckets=self.PERCENT_BUCKET_COUNT,
+          field_spec=self._field_spec,
+          description=self._description)
+      step_metric.add(percent, fields=self._fields)
+
+      fields = dict(self._fields) if self._fields is not None else dict()
+      fields['step_name'] = name
+      # TODO(pprabhu): Convert _GetStepBreakdowns() to return ratios instead of
+      # percentage when the old PercentageDistribution reporting is deleted.
+      Float('%s/duration_breakdown' % self._name).set(percent / 100,
+                                                      fields=fields)
+
+    unaccounted_metric = PercentageDistribution(
+        '%s/breakdown_unaccounted' % self._name,
+        num_buckets=self.PERCENT_BUCKET_COUNT,
+        field_spec=self._field_spec,
+        description=self._description)
+    unaccounted_metric.add(self._GetUnaccountedBreakdown(), fields=self._fields)
+
+    bucketing_loss_metric = CumulativeMetric(
+        '%s/bucketing_loss' % self._name,
+        field_spec=self._field_spec,
+        description=self._description)
+    bucketing_loss_metric.increment_by(self._GetBucketingLoss(),
+                                       fields=self._fields)
+
+  @contextlib.contextmanager
+  def Step(self, step_name):
+    """Start a new step named step_name in the timed operation.
+
+    Note that it is not possible to start a step inside a step. i.e.,
+
+    with RuntimeBreakdownTimer('timer') as timer:
+      with timer.Step('outer_step'):
+        with timer.Step('inner_step'):
+          # will by design raise an exception.
+
+    Args:
+      step_name: The name of the step being timed.
+    """
+    if self._inside_step:
+      logging.error('RuntimeBreakdownTimer.Step is not reentrant. '
+                    'Dropping step: %s', step_name)
+      yield
+      return
+
+    self._inside_step = True
+    t0 = _GetSystemClock()
+    try:
+      yield
+    finally:
+      self._inside_step = False
+      step_time_s = _GetSystemClock() - t0
+      # TODO(ayatane): Handle backward clock jumps.  See _GetSystemClock.
+      step_time_s = max(0, step_time_s)
+      self._step_metrics.append(self._StepMetrics(step_name, step_time_s))
+
+  def _GetStepBreakdowns(self):
+    """Returns percentage of time spent in each step.
+
+    Must be called after |_RecordTotalTime|.
+    """
+    if not self._total_time_s:
+      return {}
+    return {x.name: (x.time_s * 100) / self._total_time_s
+            for x in self._step_metrics}
+
+  def _GetUnaccountedBreakdown(self):
+    """Returns the percentage time spent outside of all steps.
+
+    Must be called after |_RecordTotalTime|.
+    """
+    breakdown_percentages = sum(self._GetStepBreakdowns().values())
+    return max(0, 100 - breakdown_percentages)
+
+  def _GetBucketingLoss(self):
+    """Compute the actual loss in reported percentages due to bucketing.
+
+    Must be called after |_RecordTotalTime|.
+    """
+    reported = list(self._GetStepBreakdowns().values())
+    reported.append(self._GetUnaccountedBreakdown())
+    bucket_width = 100 / self.PERCENT_BUCKET_COUNT
+    return sum(x % bucket_width for x in reported)
+
+  def _RecordTotalTime(self):
+    self._total_time_s = _GetSystemClock() - self._outer_t0
+    # TODO(ayatane): Handle backward clock jumps.  See _GetSystemClock.
+    self._total_time_s = max(0, self._total_time_s)
+
+
+def _GetSystemClock():
+  """Return a clock time.
+
+  The only thing that the return value can be used for is to subtract from
+  other instances to determine time elapsed.
+  """
+  # TODO(ayatane): We should use a monotonic clock to measure this,
+  # but Python 2 does not have one.
+  return time.time()
+
+
+def Flush(reset_after=()):
+  """Flushes metrics, but warns on transient errors.
+
+  Args:
+    reset_after: A list of metrics to reset after flushing.
+  """
+  if not ts_mon:
+    return
+
+  try:
+    ts_mon.flush()
+    while reset_after:
+      reset_after.pop().reset()
+  except ssl.SSLError as e:
+    logging.warning('Caught transient network error while flushing: %s', e)
+  except Exception as e:
+    logging.error('Caught exception while flushing: %s', e)
diff --git a/utils/frozen_chromite/lib/nebraska_wrapper.py b/utils/frozen_chromite/lib/nebraska_wrapper.py
new file mode 100644
index 0000000..b1ff634
--- /dev/null
+++ b/utils/frozen_chromite/lib/nebraska_wrapper.py
@@ -0,0 +1,360 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module containing methods and classes to interact with a nebraska instance.
+"""
+
+from __future__ import print_function
+
+import base64
+import os
+import shutil
+import multiprocessing
+import subprocess
+
+from six.moves import urllib
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import gob_util
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import path_util
+from autotest_lib.utils.frozen_chromite.lib import remote_access
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+
+
+NEBRASKA_FILENAME = 'nebraska.py'
+
+# Error msg in loading shared libraries when running python command.
+ERROR_MSG_IN_LOADING_LIB = 'error while loading shared libraries'
+
+
+class Error(Exception):
+  """Base exception class of nebraska errors."""
+
+
+class NebraskaStartupError(Error):
+  """Thrown when the nebraska fails to start up."""
+
+
+class NebraskaStopError(Error):
+  """Thrown when the nebraska fails to stop."""
+
+
+class RemoteNebraskaWrapper(multiprocessing.Process):
+  """A wrapper for nebraska.py on a remote device.
+
+  We assume there is no chroot on the device, thus we do not launch
+  nebraska inside chroot.
+  """
+  NEBRASKA_TIMEOUT = 30
+  KILL_TIMEOUT = 10
+
+  # Keep in sync with nebraska.py if not passing these directly to nebraska.
+  RUNTIME_ROOT = '/run/nebraska'
+  PID_FILE_PATH = os.path.join(RUNTIME_ROOT, 'pid')
+  PORT_FILE_PATH = os.path.join(RUNTIME_ROOT, 'port')
+  LOG_FILE_PATH = '/tmp/nebraska.log'
+  REQUEST_LOG_FILE_PATH = '/tmp/nebraska_request_log.json'
+
+  NEBRASKA_PATH = os.path.join('/usr/local/bin', NEBRASKA_FILENAME)
+
+  def __init__(self, remote_device, nebraska_bin=None,
+               update_payloads_address=None, update_metadata_dir=None,
+               install_payloads_address=None, install_metadata_dir=None):
+    """Initializes the nebraska wrapper.
+
+    Args:
+      remote_device: A remote_access.RemoteDevice object.
+      nebraska_bin: The path to the nebraska binary.
+      update_payloads_address: The root address where the payloads will be
+          served.  it can either be a local address (file://) or a remote
+          address (http://)
+      update_metadata_dir: A directory where json files for payloads required
+          for update are located.
+      install_payloads_address: Same as update_payloads_address for install
+          operations.
+      install_metadata_dir: Similar to update_metadata_dir but for install
+          payloads.
+    """
+    super(RemoteNebraskaWrapper, self).__init__()
+
+    self._device = remote_device
+    self._hostname = remote_device.hostname
+
+    self._update_payloads_address = update_payloads_address
+    self._update_metadata_dir = update_metadata_dir
+    self._install_payloads_address = install_payloads_address
+    self._install_metadata_dir = install_metadata_dir
+
+    self._nebraska_bin = nebraska_bin or self.NEBRASKA_PATH
+
+    self._port_file = self.PORT_FILE_PATH
+    self._pid_file = self.PID_FILE_PATH
+    self._log_file = self.LOG_FILE_PATH
+
+    self._port = None
+    self._pid = None
+
+  def _RemoteCommand(self, *args, **kwargs):
+    """Runs a remote shell command.
+
+    Args:
+      *args: See remote_access.RemoteDevice documentation.
+      **kwargs: See remote_access.RemoteDevice documentation.
+    """
+    kwargs.setdefault('debug_level', logging.DEBUG)
+    return self._device.run(*args, **kwargs)
+
+  def _PortFileExists(self):
+    """Checks whether the port file exists in the remove device or not."""
+    result = self._RemoteCommand(
+        ['test', '-f', self._port_file], check=False)
+    return result.returncode == 0
+
+  def _ReadPortNumber(self):
+    """Reads the port number from the port file on the remote device."""
+    if not self.is_alive():
+      raise NebraskaStartupError('Nebraska is not alive, so no port file yet!')
+
+    try:
+      timeout_util.WaitForReturnTrue(self._PortFileExists, period=5,
+                                     timeout=self.NEBRASKA_TIMEOUT)
+    except timeout_util.TimeoutError:
+      self.terminate()
+      raise NebraskaStartupError('Timeout (%s) waiting for remote nebraska'
+                                 ' port_file' % self.NEBRASKA_TIMEOUT)
+
+    self._port = int(self._RemoteCommand(
+        ['cat', self._port_file], capture_output=True).output.strip())
+
+  def IsReady(self):
+    """Returns True if nebraska is ready to accept requests."""
+    if not self.is_alive():
+      raise NebraskaStartupError('Nebraska is not alive, so not ready!')
+
+    url = 'http://%s:%d/%s' % (remote_access.LOCALHOST_IP, self._port,
+                               'health_check')
+    # Running curl through SSH because the port on the device is not accessible
+    # by default.
+    result = self._RemoteCommand(
+        ['curl', url, '-o', '/dev/null'], check=False)
+    return result.returncode == 0
+
+  def _WaitUntilStarted(self):
+    """Wait until the nebraska has started."""
+    if not self._port:
+      self._ReadPortNumber()
+
+    try:
+      timeout_util.WaitForReturnTrue(self.IsReady,
+                                     timeout=self.NEBRASKA_TIMEOUT,
+                                     period=5)
+    except timeout_util.TimeoutError:
+      raise NebraskaStartupError('Nebraska did not start.')
+
+    self._pid = int(self._RemoteCommand(
+        ['cat', self._pid_file], capture_output=True).output.strip())
+    logging.info('Started nebraska with pid %s', self._pid)
+
+  def run(self):
+    """Launches a nebraska process on the device.
+
+    Starts a background nebraska and waits for it to finish.
+    """
+    logging.info('Starting nebraska on %s', self._hostname)
+
+    if not self._update_metadata_dir:
+      raise NebraskaStartupError(
+          'Update metadata directory location is not passed.')
+
+    cmd = [
+        'python', self._nebraska_bin,
+        '--update-metadata', self._update_metadata_dir,
+    ]
+
+    if self._update_payloads_address:
+      cmd += ['--update-payloads-address', self._update_payloads_address]
+    if self._install_metadata_dir:
+      cmd += ['--install-metadata', self._install_metadata_dir]
+    if self._install_payloads_address:
+      cmd += ['--install-payloads-address', self._install_payloads_address]
+
+    try:
+      self._RemoteCommand(cmd, stdout=True, stderr=subprocess.STDOUT)
+    except cros_build_lib.RunCommandError as err:
+      msg = 'Remote nebraska failed (to start): %s' % str(err)
+      logging.error(msg)
+      raise NebraskaStartupError(msg)
+
+  def Start(self):
+    """Starts the nebraska process remotely on the remote device."""
+    if self.is_alive():
+      logging.warning('Nebraska is already running, not running again.')
+      return
+
+    self.start()
+    self._WaitUntilStarted()
+
+  def Stop(self):
+    """Stops the nebraska instance if its running.
+
+    Kills the nebraska instance with SIGTERM (and SIGKILL if SIGTERM fails).
+    """
+    logging.debug('Stopping nebraska instance with pid %s', self._pid)
+    if self.is_alive():
+      self._RemoteCommand(['kill', str(self._pid)], check=False)
+    else:
+      logging.debug('Nebraska is not running, stopping nothing!')
+      return
+
+    self.join(self.KILL_TIMEOUT)
+    if self.is_alive():
+      logging.warning('Nebraska is unstoppable. Killing with SIGKILL.')
+      try:
+        self._RemoteCommand(['kill', '-9', str(self._pid)])
+      except cros_build_lib.RunCommandError as e:
+        raise NebraskaStopError('Unable to stop Nebraska: %s' % e)
+
+  def GetURL(self, ip=remote_access.LOCALHOST_IP,
+             critical_update=False, no_update=False):
+    """Returns the URL which the devserver is running on.
+
+    Args:
+      ip: The ip of running nebraska if different than localhost.
+      critical_update: Whether nebraska has to instruct the update_engine that
+          the update is a critical one or not.
+      no_update: Whether nebraska has to give a noupdate response even if it
+          detected an update.
+
+    Returns:
+      An HTTP URL that can be passed to the update_engine_client in --omaha_url
+          flag.
+    """
+    query_dict = {}
+    if critical_update:
+      query_dict['critical_update'] = True
+    if no_update:
+      query_dict['no_update'] = True
+    query_string = urllib.parse.urlencode(query_dict)
+
+    return ('http://%s:%d/update/%s' %
+            (ip, self._port, (('?%s' % query_string) if query_string else '')))
+
+  def PrintLog(self):
+    """Print Nebraska log to stdout."""
+    if self._RemoteCommand(
+        ['test', '-f', self._log_file], check=False).returncode != 0:
+      logging.error('Nebraska log file %s does not exist on the device.',
+                    self._log_file)
+      return
+
+    result = self._RemoteCommand(['cat', self._log_file], capture_output=True)
+    output = '--- Start output from %s ---\n' % self._log_file
+    output += result.output
+    output += '--- End output from %s ---' % self._log_file
+    return output
+
+  def CollectLogs(self, target_log):
+    """Copies the nebraska logs from the device.
+
+    Args:
+      target_log: The file to copy the log to from the device.
+    """
+    try:
+      self._device.CopyFromDevice(self._log_file, target_log)
+    except (remote_access.RemoteAccessException,
+            cros_build_lib.RunCommandError) as err:
+      logging.error('Failed to copy nebraska logs from device, ignoring: %s',
+                    str(err))
+
+  def CollectRequestLogs(self, target_log):
+    """Copies the nebraska logs from the device.
+
+    Args:
+      target_log: The file to write the log to.
+    """
+    if not self.is_alive():
+      return
+
+    request_log_url = 'http://%s:%d/requestlog' % (remote_access.LOCALHOST_IP,
+                                                   self._port)
+    try:
+      self._RemoteCommand(
+          ['curl', request_log_url, '-o', self.REQUEST_LOG_FILE_PATH])
+      self._device.CopyFromDevice(self.REQUEST_LOG_FILE_PATH, target_log)
+    except (remote_access.RemoteAccessException,
+            cros_build_lib.RunCommandError) as err:
+      logging.error('Failed to get requestlog from nebraska. ignoring: %s',
+                    str(err))
+
+  def CheckNebraskaCanRun(self):
+    """Checks to see if we can start nebraska.
+
+    If the stateful partition is corrupted, Python or other packages needed for
+    rootfs update may be missing on |device|.
+
+    This will also use `ldconfig` to update library paths on the target
+    device if it looks like that's causing problems, which is necessary
+    for base images.
+
+    Raise NebraskaStartupError if nebraska cannot start.
+    """
+
+    # Try to capture the output from the command so we can dump it in the case
+    # of errors. Note that this will not work if we were requested to redirect
+    # logs to a |log_file|.
+    cmd_kwargs = {'capture_output': True, 'stderr': subprocess.STDOUT}
+    cmd = ['python', self._nebraska_bin, '--help']
+    logging.info('Checking if we can run nebraska on the device...')
+    try:
+      self._RemoteCommand(cmd, **cmd_kwargs)
+    except cros_build_lib.RunCommandError as e:
+      logging.warning('Cannot start nebraska.')
+      logging.warning(e.result.error)
+      if ERROR_MSG_IN_LOADING_LIB in str(e):
+        logging.info('Attempting to correct device library paths...')
+        try:
+          self._RemoteCommand(['ldconfig'], **cmd_kwargs)
+          self._RemoteCommand(cmd, **cmd_kwargs)
+          logging.info('Library path correction successful.')
+          return
+        except cros_build_lib.RunCommandError as e2:
+          logging.warning('Library path correction failed:')
+          logging.warning(e2.result.error)
+          raise NebraskaStartupError(e.result.error)
+
+      raise NebraskaStartupError(str(e))
+
+  @staticmethod
+  def GetNebraskaSrcFile(source_dir):
+    """Returns path to nebraska source file.
+
+    nebraska is copied to source_dir, either from a local file or by
+    downloading from googlesource.com.
+    """
+    assert os.path.isdir(source_dir), ('%s must be a valid directory.'
+                                       % source_dir)
+
+    nebraska_path = os.path.join(source_dir, NEBRASKA_FILENAME)
+    checkout = path_util.DetermineCheckout()
+    if checkout.type == path_util.CHECKOUT_TYPE_REPO:
+      # ChromeOS checkout. Copy existing file to destination.
+      local_src = os.path.join(constants.SOURCE_ROOT, 'src', 'platform',
+                               'dev', 'nebraska', NEBRASKA_FILENAME)
+      assert os.path.isfile(local_src), "%s doesn't exist" % local_src
+      shutil.copy2(local_src, source_dir)
+    else:
+      # Download from googlesource.
+      nebraska_url_path = '%s/+/%s/%s?format=text' % (
+          'chromiumos/platform/dev-util', 'refs/heads/master',
+          'nebraska/nebraska.py')
+      contents_b64 = gob_util.FetchUrl(constants.EXTERNAL_GOB_HOST,
+                                       nebraska_url_path)
+      osutils.WriteFile(nebraska_path,
+                        base64.b64decode(contents_b64).decode('utf-8'))
+
+    return nebraska_path
diff --git a/utils/frozen_chromite/lib/operation.py b/utils/frozen_chromite/lib/operation.py
new file mode 100644
index 0000000..c3595c4
--- /dev/null
+++ b/utils/frozen_chromite/lib/operation.py
@@ -0,0 +1,689 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Operation, including output and progress display
+
+This module implements the concept of an operation, which has regular progress
+updates, verbose text display and perhaps some errors.
+"""
+
+from __future__ import division
+from __future__ import print_function
+
+import collections
+import contextlib
+import fcntl
+import multiprocessing
+import os
+import pty
+import re
+import struct
+import sys
+import termios
+
+from six.moves import queue as Queue
+
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import parallel
+from autotest_lib.utils.frozen_chromite.lib.terminal import Color
+from autotest_lib.utils.frozen_chromite.utils import outcap
+
+
+# Define filenames for captured stdout and stderr.
+STDOUT_FILE = 'stdout'
+STDERR_FILE = 'stderr'
+
+_TerminalSize = collections.namedtuple('_TerminalSize', ('lines', 'columns'))
+
+
+class _BackgroundTaskComplete(object):
+  """Sentinal object to indicate that the background task is complete."""
+
+
+class ProgressBarOperation(object):
+  """Wrapper around long running functions to show progress.
+
+  This class is intended to capture the output of a long running fuction, parse
+  the output, and display a progress bar.
+
+  To display a progress bar for a function foo with argument foo_args, this is
+  the usage case:
+    1) Create a class that inherits from ProgressBarOperation (e.g.
+    FooTypeOperation. In this class, override the ParseOutput method to parse
+    the output of foo.
+    2) op = operation.FooTypeOperation()
+       op.Run(foo, foo_args)
+  """
+
+  # Subtract 10 characters from the width of the terminal because these are used
+  # to display the percentage as well as other spaces.
+  _PROGRESS_BAR_BORDER_SIZE = 10
+
+  # By default, update the progress bar every 100 ms.
+  _PROGRESS_BAR_UPDATE_INTERVAL = 0.1
+
+  def __init__(self):
+    self._queue = multiprocessing.Queue()
+    self._stderr = None
+    self._stdout = None
+    self._stdout_path = None
+    self._stderr_path = None
+    self._progress_bar_displayed = False
+    self._isatty = os.isatty(sys.stdout.fileno())
+
+  def _GetTerminalSize(self, fd=pty.STDOUT_FILENO):
+    """Return a terminal size object for |fd|.
+
+    Note: Replace with os.terminal_size() in python3.3.
+    """
+    winsize = struct.pack('HHHH', 0, 0, 0, 0)
+    data = fcntl.ioctl(fd, termios.TIOCGWINSZ, winsize)
+    winsize = struct.unpack('HHHH', data)
+    return _TerminalSize(int(winsize[0]), int(winsize[1]))
+
+  def ProgressBar(self, progress):
+    """This method creates and displays a progress bar.
+
+    If not in a terminal, we do not display a progress bar.
+
+    Args:
+      progress: a float between 0 and 1 that represents the fraction of the
+        current progress.
+    """
+    if not self._isatty:
+      return
+    self._progress_bar_displayed = True
+    progress = max(0.0, min(1.0, progress))
+    width = max(1, self._GetTerminalSize().columns -
+                self._PROGRESS_BAR_BORDER_SIZE)
+    block = int(width * progress)
+    shaded = '#' * block
+    unshaded = '-' * (width - block)
+    text = '\r [%s%s] %d%%' % (shaded, unshaded, progress * 100)
+    sys.stdout.write(text)
+    sys.stdout.flush()
+
+  def OpenStdoutStderr(self):
+    """Open the stdout and stderr streams."""
+    if self._stdout is None and self._stderr is None:
+      self._stdout = open(self._stdout_path, 'r')
+      self._stderr = open(self._stderr_path, 'r')
+
+  def Cleanup(self):
+    """Method to cleanup progress bar.
+
+    If progress bar has been printed, then we make sure it displays 100% before
+    exiting.
+    """
+    if self._progress_bar_displayed:
+      self.ProgressBar(1)
+      sys.stdout.write('\n')
+      sys.stdout.flush()
+
+  def ParseOutput(self, output=None):
+    """Method to parse output and update progress bar.
+
+    This method should be overridden to read and parse the lines in _stdout and
+    _stderr.
+
+    One example use of this method could be to detect 'foo' in stdout and
+    increment the progress bar every time foo is seen.
+
+    def ParseOutput(self):
+      stdout = self._stdout.read()
+      if 'foo' in stdout:
+        # Increment progress bar.
+
+    Args:
+      output: Pass in output to parse instead of reading from self._stdout and
+        self._stderr.
+    """
+    raise NotImplementedError('Subclass must override this method.')
+
+  # TODO(ralphnathan): Deprecate this function and use parallel._BackgroundTask
+  # instead (brbug.com/863)
+  def WaitUntilComplete(self, update_period):
+    """Return True if running background task has completed."""
+    try:
+      x = self._queue.get(timeout=update_period)
+      if isinstance(x, _BackgroundTaskComplete):
+        return True
+    except Queue.Empty:
+      return False
+
+  def CaptureOutputInBackground(self, func, *args, **kwargs):
+    """Launch func in background and capture its output.
+
+    Args:
+      func: Function to execute in the background and whose output is to be
+        captured.
+      log_level: Logging level to run the func at. By default, it runs at log
+        level info.
+    """
+    log_level = kwargs.pop('log_level', logging.INFO)
+    restore_log_level = logging.getLogger().getEffectiveLevel()
+    logging.getLogger().setLevel(log_level)
+    try:
+      with outcap.OutputCapturer(
+          stdout_path=self._stdout_path, stderr_path=self._stderr_path,
+          quiet_fail=False):
+        func(*args, **kwargs)
+    finally:
+      self._queue.put(_BackgroundTaskComplete())
+      logging.getLogger().setLevel(restore_log_level)
+
+  # TODO (ralphnathan): Store PID of spawned process.
+  def Run(self, func, *args, **kwargs):
+    """Run func, parse its output, and update the progress bar.
+
+    Args:
+      func: Function to execute in the background and whose output is to be
+        captured.
+      update_period: Optional argument to specify the period that output should
+        be read.
+      log_level: Logging level to run the func at. By default, it runs at log
+        level info.
+    """
+    update_period = kwargs.pop('update_period',
+                               self._PROGRESS_BAR_UPDATE_INTERVAL)
+
+    # If we are not running in a terminal device, do not display the progress
+    # bar.
+    if not self._isatty:
+      log_level = kwargs.pop('log_level', logging.INFO)
+      restore_log_level = logging.getLogger().getEffectiveLevel()
+      logging.getLogger().setLevel(log_level)
+      try:
+        func(*args, **kwargs)
+      finally:
+        logging.getLogger().setLevel(restore_log_level)
+      return
+
+    with osutils.TempDir() as tempdir:
+      self._stdout_path = os.path.join(tempdir, STDOUT_FILE)
+      self._stderr_path = os.path.join(tempdir, STDERR_FILE)
+      osutils.Touch(self._stdout_path)
+      osutils.Touch(self._stderr_path)
+      try:
+        with parallel.BackgroundTaskRunner(
+            self.CaptureOutputInBackground, func, *args, **kwargs) as queue:
+          queue.put([])
+          self.OpenStdoutStderr()
+          while True:
+            self.ParseOutput()
+            if self.WaitUntilComplete(update_period):
+              break
+        # Before we exit, parse the output again to update progress bar.
+        self.ParseOutput()
+        # Final sanity check to update the progress bar to 100% if it was used
+        # by ParseOutput
+        self.Cleanup()
+      except:
+        # Add a blank line before the logging message so the message isn't
+        # touching the progress bar.
+        sys.stdout.write('\n')
+        logging.error('Oops. Something went wrong.')
+        # Raise the exception so it can be caught again.
+        raise
+
+
+class ParallelEmergeOperation(ProgressBarOperation):
+  """ProgressBarOperation specific for scripts/parallel_emerge.py."""
+
+  def __init__(self):
+    super(ParallelEmergeOperation, self).__init__()
+    self._total = None
+    self._completed = 0
+    self._printed_no_packages = False
+    self._events = ['Fetched ', 'Completed ']
+    self._msg = None
+
+  def _GetTotal(self, output):
+    """Get total packages by looking for Total: digits packages."""
+    match = re.search(r'Total: (\d+) packages', output)
+    return int(match.group(1)) if match else None
+
+  def SetProgressBarMessage(self, msg):
+    """Message to be shown before the progress bar is displayed with 0%.
+
+       The message is not displayed if the progress bar is not going to be
+       displayed.
+    """
+    self._msg = msg
+
+  def ParseOutput(self, output=None):
+    """Parse the output of emerge to determine how to update progress bar.
+
+    1) Figure out how many packages exist. If the total number of packages to be
+    built is zero, then we do not display the progress bar.
+    2) Whenever a package is downloaded or built, 'Fetched' and 'Completed' are
+    printed respectively. By counting counting 'Fetched's and 'Completed's, we
+    can determine how much to update the progress bar by.
+
+    Args:
+      output: Pass in output to parse instead of reading from self._stdout and
+        self._stderr.
+
+    Returns:
+      A fraction between 0 and 1 indicating the level of the progress bar. If
+      the progress bar isn't displayed, then the return value is -1.
+    """
+    if output is None:
+      stdout = self._stdout.read()
+      stderr = self._stderr.read()
+      output = stdout + stderr
+
+    if self._total is None:
+      temp = self._GetTotal(output)
+      if temp is not None:
+        self._total = temp * len(self._events)
+        if self._msg is not None:
+          logging.notice(self._msg)
+
+    for event in self._events:
+      self._completed += output.count(event)
+
+    if not self._printed_no_packages and self._total == 0:
+      logging.notice('No packages to build.')
+      self._printed_no_packages = True
+
+    if self._total:
+      progress = self._completed / self._total
+      self.ProgressBar(progress)
+      return progress
+    else:
+      return -1
+
+
+# TODO(sjg): When !isatty(), keep stdout and stderr separate so they can be
+# redirected separately
+# TODO(sjg): Add proper docs to this fileno
+# TODO(sjg): Handle stdin wait in quite mode, rather than silently stalling
+
+class Operation(object):
+  """Class which controls stdio and progress of an operation in progress.
+
+  This class is created to handle stdio for a running subprocess. It filters
+  it looking for errors and progress information. Optionally it can output the
+  stderr and stdout to the terminal, but it is normally supressed.
+
+  Progress information is garnered from the subprocess output based on
+  knowledge of the legacy scripts, but at some point will move over to using
+  real progress information reported through new python methods which will
+  replace the scripts.
+
+  Each operation has a name, and this class handles displaying this name
+  as it reports progress.
+
+  Operation Objects
+  =================
+
+  verbose: True / False
+    In verbose mode all output from subprocesses is displayed, otherwise
+    this output is normally supressed, unless we think it indicates an error.
+
+  progress: True / False
+    The output from subprocesses can be analysed in a very basic manner to
+    try to present progress information to the user.
+
+  explicit_verbose: True / False
+    False if we are not just using default verbosity. In that case we allow
+    verbosity to be enabled on request, since the user has not explicitly
+    disabled it. This is used by commands that the user issues with the
+    expectation that output would ordinarily be visible.
+  """
+
+  def __init__(self, name, color=None):
+    """Create a new operation.
+
+    Args:
+      name: Operation name in a form to be displayed for the user.
+      color: Determines policy for sending color to stdout; see terminal.Color
+        for details on interpretation on the value.
+    """
+    self._name = name   # Operation name.
+    self.verbose = False   # True to echo subprocess output.
+    self.progress = True   # True to report progress of the operation
+    self._column = 0    # Current output column (always 0 unless verbose).
+    self._update_len = 0    # Length of last progress update message.
+    self._line = ''   # text of current line, so far
+    self.explicit_verbose = False
+
+    self._color = Color(enabled=color)
+
+    # -1 = no newline pending
+    #  n = newline pending, and line length of last line was n
+    self._pending_nl = -1
+
+    # the type of the last stream to emit data on the current lines
+    # can be sys.stdout, sys.stderr (both from the subprocess), or None
+    # for our own mesages
+    self._cur_stream = None
+
+    self._error_count = 0   # number of error lines we have reported
+
+  def __del__(self):
+    """Object is about to be destroyed, so finish out output cleanly."""
+    self.FinishOutput()
+
+  def FinishOutput(self):
+    """Finish off any pending output.
+
+    This finishes any output line currently in progress and resets the color
+    back to normal.
+    """
+    self._FinishLine(self.verbose, final=True)
+    if self._column and self.verbose:
+      print(self._color.Stop())
+      self._column = 0
+
+  def WereErrorsDetected(self):
+    """Returns whether any errors have been detected.
+
+    Returns:
+      True if any errors have been detected in subprocess output so far.
+      False otherwise
+    """
+    return self._error_count > 0
+
+  def SetName(self, name):
+    """Set the name of the operation as displayed to the user.
+
+    Args:
+      name: Operation name.
+    """
+    self._name = name
+
+  def _FilterOutputForErrors(self, line, print_error):
+    """Filter a line of output to look for and display errors.
+
+    This uses a few regular expression searches to spot common error reports
+    from subprocesses. A count of these is kept so we know how many occurred.
+    Optionally they are displayed in red on the terminal.
+
+    Args:
+      line: the output line to filter, as a string.
+      print_error: True to print the error, False to just record it.
+    """
+    bad_things = ['Cannot GET', 'ERROR', '!!!', 'FAILED']
+    for bad_thing in bad_things:
+      if re.search(bad_thing, line, flags=re.IGNORECASE):
+        self._error_count += 1
+        if print_error:
+          print(self._color.Color(self._color.RED, line))
+          break
+
+  def _FilterOutputForProgress(self, line):
+    """Filter a line of output to look for and dispay progress information.
+
+    This uses a simple regular expression search to spot progress information
+    coming from subprocesses. This is sent to the _Progress() method.
+
+    Args:
+      line: the output line to filter, as a string.
+    """
+    match = re.match(r'Pending (\d+).*Total (\d+)', line)
+    if match:
+      pending = int(match.group(1))
+      total = int(match.group(2))
+      self._Progress(total - pending, total)
+
+  def _Progress(self, upto, total):
+    """Record and optionally display progress information.
+
+    Args:
+      upto: which step we are up to in the operation (integer, from 0).
+      total: total number of steps in operation,
+    """
+    if total > 0:
+      update_str = '%s...%d%% (%d of %d)' % (self._name,
+                                             upto * 100 // total, upto, total)
+      if self.progress:
+        # Finish the current line, print progress, and remember its length.
+        self._FinishLine(self.verbose)
+
+        # Sometimes the progress string shrinks and in this case we need to
+        # blank out the characters at the end of the line that will not be
+        # overwritten by the new line
+        pad = max(self._update_len - len(update_str), 0)
+        sys.stdout.write(update_str + (' ' * pad) + '\r')
+        self._update_len = len(update_str)
+
+  def _FinishLine(self, display, final=False):
+    """Finish off the current line and prepare to start a new one.
+
+    If a new line is pending from the previous line, then this will be output,
+    along with a color reset if needed.
+
+    We also handle removing progress messages from the output. This is done
+    using a carriage return character, following by spaces.
+
+    Args:
+      display: True to display output, False to suppress it
+      final: True if this is the final output before we exit, in which case
+          we must clean up any remaining progress message by overwriting
+          it with spaces, then carriage return
+    """
+    if display:
+      if self._pending_nl != -1:
+        # If out last output line was shorter than the progress info
+        # add spaces.
+        if self._pending_nl < self._update_len:
+          print(' ' * (self._update_len - self._pending_nl), end='')
+
+        # Output the newline, and reset our counter.
+        sys.stdout.write(self._color.Stop())
+        print()
+
+    # If this is the last thing that this operation will print, we need to
+    # close things off. So if there is some text on the current line but not
+    # enough to overwrite all the progress information we have sent, add some
+    # more spaces.
+    if final and self._update_len:
+      print(' ' * self._update_len, '\r', end='')
+
+    self._pending_nl = -1
+
+  def _CheckStreamAndColor(self, stream, display):
+    """Check that we're writing to the same stream as last call.  No?  New line.
+
+    If starting a new line, set the color correctly:
+      stdout  Magenta
+      stderr  Red
+      other   White / no colors
+
+    Args:
+      stream: The stream we're going to write to.
+      display: True to display it on terms, False to suppress it.
+    """
+    if self._column > 0 and stream != self._cur_stream:
+      self._FinishLine(display)
+      if display:
+        print(self._color.Stop())
+
+      self._column = 0
+      self._line = ''
+
+    # Use colors for child output.
+    if self._column == 0:
+      self._FinishLine(display)
+      if display:
+        color = None
+        if stream == sys.stdout:
+          color = self._color.MAGENTA
+        elif stream == sys.stderr:
+          color = self._color.RED
+        if color:
+          sys.stdout.write(self._color.Start(color))
+
+      self._cur_stream = stream
+
+  def _Out(self, stream, text, display, newline=False, do_output_filter=True):
+    """Output some text received from a child, or generated internally.
+
+    This method is the guts of the Operation class since it understands how to
+    convert a series of output requests on different streams into something
+    coherent for the user.
+
+    If the stream has changed, then a new line is started even if we were
+    still halfway through the previous line. This prevents stdout and stderr
+    becoming mixed up quite so badly.
+
+    We use color to indicate lines which are stdout and stderr. If the output
+    received from the child has color codes in it already, we pass these
+    through, so our colors can be overridden. If output is redirected then we
+    do not add color by default. Note that nothing stops the child from adding
+    it, but since we present ourselves as a terminal to the child, one might
+    hope that the child will not generate color.
+
+    If display is False, then we will not actually send this text to the
+    terminal. This is uses when verbose is required to be False.
+
+    Args:
+      stream: stream on which the text was received:
+        sys.stdout    - received on stdout
+        sys.stderr    - received on stderr
+        None          - generated by us / internally
+      text: text to output
+      display: True to display it on terms, False to suppress it
+      newline: True to start a new line after this text, False to put the next
+        lot of output immediately after this.
+      do_output_filter: True to look through output for errors and progress.
+    """
+    self._CheckStreamAndColor(stream, display)
+
+    # Output what we have, and remember what column we are up to.
+    if display:
+      sys.stdout.write(text)
+      self._column += len(text)
+      # If a newline is required, remember to output it later.
+      if newline:
+        self._pending_nl = self._column
+        self._column = 0
+
+    self._line += text
+
+    # If we now have a whole line, check it for errors and progress.
+    if newline:
+      if do_output_filter:
+        self._FilterOutputForErrors(self._line, print_error=not display)
+        self._FilterOutputForProgress(self._line)
+      self._line = ''
+
+  def Output(self, stream, data):
+    r"""Handle the output of a block of text from the subprocess.
+
+    All subprocess output should be sent through this method. It is split into
+    lines which are processed separately using the _Out() method.
+
+    Args:
+      stream: Which file the output come in on:
+        sys.stdout: stdout
+        sys.stderr: stderr
+        None: Our own internal output
+      data: Output data as a big string, potentially containing many lines of
+        text. Each line should end with \r\n. There is no requirement to send
+        whole lines - this method happily handles fragments and tries to
+        present then to the user as early as possible
+
+    #TODO(sjg): Just use a list as the input parameter to avoid the split.
+    """
+    # We cannot use splitlines() here as we need this exact behavior
+    lines = data.split('\r\n')
+
+    # Output each full line, with a \n after it.
+    for line in lines[:-1]:
+      self._Out(stream, line, display=self.verbose, newline=True)
+
+    # If we have a partial line at the end, output what we have.
+    # We will continue it later.
+    if lines[-1]:
+      self._Out(stream, lines[-1], display=self.verbose)
+
+    # Flush so that the terminal will receive partial line output (now!)
+    sys.stdout.flush()
+
+  def Outline(self, line):
+    r"""Output a line of text to the display.
+
+    This outputs text generated internally, such as a warning message or error
+    summary. It ensures that our message plays nicely with child output if
+    any.
+
+    Args:
+      line: text to output (without \n on the end)
+    """
+    self._Out(None, line, display=True, newline=True)
+    self._FinishLine(display=True)
+
+  def Info(self, line):
+    r"""Output a line of information text to the display in verbose mode.
+
+    Args:
+      line: text to output (without \n on the end)
+    """
+    self._Out(None, self._color.Color(self._color.BLUE, line),
+              display=self.verbose, newline=True, do_output_filter=False)
+    self._FinishLine(display=True)
+
+  def Notice(self, line):
+    r"""Output a line of notification text to the display.
+
+    Args:
+      line: text to output (without \n on the end)
+    """
+    self._Out(None, self._color.Color(self._color.GREEN, line),
+              display=True, newline=True, do_output_filter=False)
+    self._FinishLine(display=True)
+
+  def Warning(self, line):
+    r"""Output a line of warning text to the display.
+
+    Args:
+      line: text to output (without \n on the end)
+    """
+    self._Out(None, self._color.Color(self._color.YELLOW, line),
+              display=True, newline=True, do_output_filter=False)
+    self._FinishLine(display=True)
+
+  def Error(self, line):
+    r"""Output a line of error text to the display.
+
+    Args:
+      line: text to output (without \n on the end)
+    """
+    self._Out(None, self._color.Color(self._color.RED, line),
+              display=True, newline=True, do_output_filter=False)
+    self._FinishLine(display=True)
+
+  def Die(self, line):
+    r"""Output a line of error text to the display and die.
+
+    Args:
+      line: text to output (without \n on the end)
+    """
+    self.Error(line)
+    sys.exit(1)
+
+  @contextlib.contextmanager
+  def RequestVerbose(self, request):
+    """Perform something in verbose mode if the user hasn't disallowed it
+
+    This is intended to be used with something like:
+
+      with oper.RequestVerbose(True):
+        ... do some things that generate output
+
+    Args:
+      request: True to request verbose mode if available, False to do nothing.
+    """
+    old_verbose = self.verbose
+    if request and not self.explicit_verbose:
+      self.verbose = True
+    try:
+      yield
+    finally:
+      self.verbose = old_verbose
diff --git a/utils/frozen_chromite/lib/osutils.py b/utils/frozen_chromite/lib/osutils.py
new file mode 100644
index 0000000..dc5b1de
--- /dev/null
+++ b/utils/frozen_chromite/lib/osutils.py
@@ -0,0 +1,1414 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common file and os related utilities, including tempdir manipulation."""
+
+from __future__ import print_function
+
+import collections
+import contextlib
+import ctypes
+import ctypes.util
+import datetime
+import errno
+import glob
+import hashlib
+import os
+import pwd
+import re
+import shutil
+import stat
+import subprocess
+import tempfile
+
+import six
+
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+from autotest_lib.utils.frozen_chromite.utils import key_value_store
+
+
+# Env vars that tempdir can be gotten from; minimally, this
+# needs to match python's tempfile module and match normal
+# unix standards.
+_TEMPDIR_ENV_VARS = ('TMPDIR', 'TEMP', 'TMP')
+
+
+def GetNonRootUser():
+  """Returns a non-root user. Defaults to the current user.
+
+  If the current user is root, returns the username of the person who
+  ran the emerge command. If running using sudo, returns the username
+  of the person who ran the sudo command. If no non-root user is
+  found, returns None.
+  """
+  uid = os.getuid()
+  if uid == 0:
+    user = os.environ.get('PORTAGE_USERNAME', os.environ.get('SUDO_USER'))
+  else:
+    user = pwd.getpwuid(os.getuid()).pw_name
+
+  if user == 'root':
+    return None
+  else:
+    return user
+
+
+def IsChildProcess(pid, name=None):
+  """Return True if pid is a child of the current process.
+
+  Args:
+    pid: Child pid to search for in current process's pstree.
+    name: Name of the child process.
+
+  Note:
+    This function is not fool proof. If the process tree contains wierd names,
+    an incorrect match might be possible.
+  """
+  cmd = ['pstree', '-Ap', str(os.getpid())]
+  pstree = cros_build_lib.run(cmd, capture_output=True, print_cmd=False,
+                              encoding='utf-8').stdout
+  if name is None:
+    match = '(%d)' % pid
+  else:
+    match = '-%s(%d)' % (name, pid)
+  return match in pstree
+
+
+def ExpandPath(path):
+  """Returns path after passing through realpath and expanduser."""
+  return os.path.realpath(os.path.expanduser(path))
+
+
+def IsSubPath(path, other):
+  """Returns whether |path| is a sub path of |other|."""
+  path = os.path.abspath(path)
+  other = os.path.abspath(other)
+  if path == other:
+    return True
+  return path.startswith(other + os.sep)
+
+
+def AllocateFile(path, size, makedirs=False):
+  """Allocates a file of a certain |size| in |path|.
+
+  Args:
+    path: Path to allocate the file.
+    size: The length, in bytes, of the desired file.
+    makedirs: If True, create missing leading directories in the path.
+  """
+  if makedirs:
+    SafeMakedirs(os.path.dirname(path))
+
+  with open(path, 'w') as out:
+    out.truncate(size)
+
+
+# All the modes that we allow people to pass to WriteFile.  This allows us to
+# make assumptions about the input so we can update it if needed.
+_VALID_WRITE_MODES = {
+    # Read & write, but no truncation, and file offset is 0.
+    'r+', 'r+b',
+    # Writing (and maybe reading) with truncation.
+    'w', 'wb', 'w+', 'w+b',
+    # Writing (and maybe reading), but no truncation, and file offset is at end.
+    'a', 'ab', 'a+', 'a+b',
+}
+
+
+def WriteFile(path, content, mode='w', encoding=None, errors=None, atomic=False,
+              makedirs=False, sudo=False):
+  """Write the given content to disk.
+
+  Args:
+    path: Pathway to write the content to.
+    content: Content to write.  May be either an iterable, or a string.
+    mode: The mode to use when opening the file.  'w' is for text files (see the
+      following settings) and 'wb' is for binary files.  If appending, pass
+      'w+', etc...
+    encoding: The encoding of the file content.  Text files default to 'utf-8'.
+    errors: How to handle encoding errors.  Text files default to 'strict'.
+    atomic: If the updating of the file should be done atomically.  Note this
+            option is incompatible w/ append mode.
+    makedirs: If True, create missing leading directories in the path.
+    sudo: If True, write the file as root.
+  """
+  if mode not in _VALID_WRITE_MODES:
+    raise ValueError('mode must be one of {"%s"}, not %r' %
+                     ('", "'.join(sorted(_VALID_WRITE_MODES)), mode))
+
+  if sudo and atomic and ('a' in mode or '+' in mode):
+    raise ValueError('append mode does not work in sudo+atomic mode')
+
+  if 'b' in mode:
+    if encoding is not None or errors is not None:
+      raise ValueError('binary mode does not use encoding/errors')
+  else:
+    if encoding is None:
+      encoding = 'utf-8'
+    if errors is None:
+      errors = 'strict'
+
+  if makedirs:
+    SafeMakedirs(os.path.dirname(path), sudo=sudo)
+
+  # TODO(vapier): We can merge encoding/errors into the open call once we are
+  # Python 3 only.  Until then, we have to handle it ourselves.
+  if 'b' in mode:
+    write_wrapper = lambda x: x
+  else:
+    mode += 'b'
+    def write_wrapper(iterable):
+      for item in iterable:
+        yield item.encode(encoding, errors)
+
+  # If the file needs to be written as root and we are not root, write to a temp
+  # file, move it and change the permission.
+  if sudo and os.getuid() != 0:
+    if 'a' in mode or '+' in mode:
+      # Use dd to run through sudo & append the output, and write the new data
+      # to it through stdin.
+      cros_build_lib.sudo_run(
+          ['dd', 'conv=notrunc', 'oflag=append', 'status=none',
+           'of=%s' % (path,)], print_cmd=False, input=content)
+
+    else:
+      with tempfile.NamedTemporaryFile(mode=mode, delete=False) as temp:
+        write_path = temp.name
+        temp.writelines(write_wrapper(
+            cros_build_lib.iflatten_instance(content)))
+      os.chmod(write_path, 0o644)
+
+      try:
+        mv_target = path if not atomic else path + '.tmp'
+        cros_build_lib.sudo_run(['mv', write_path, mv_target],
+                                print_cmd=False, stderr=True)
+        Chown(mv_target, user='root', group='root')
+        if atomic:
+          cros_build_lib.sudo_run(['mv', mv_target, path],
+                                  print_cmd=False, stderr=True)
+
+      except cros_build_lib.RunCommandError:
+        SafeUnlink(write_path)
+        SafeUnlink(mv_target)
+        raise
+
+  else:
+    # We have the right permissions, simply write the file in python.
+    write_path = path
+    if atomic:
+      write_path = path + '.tmp'
+    with open(write_path, mode) as f:
+      f.writelines(write_wrapper(cros_build_lib.iflatten_instance(content)))
+
+    if not atomic:
+      return
+
+    try:
+      os.rename(write_path, path)
+    except EnvironmentError:
+      SafeUnlink(write_path)
+      raise
+
+
+def Touch(path, makedirs=False, mode=None):
+  """Simulate unix touch. Create if doesn't exist and update its timestamp.
+
+  Args:
+    path: a string, file name of the file to touch (creating if not present).
+    makedirs: If True, create missing leading directories in the path.
+    mode: The access permissions to set.  In the style of chmod.  Defaults to
+          using the umask.
+  """
+  if makedirs:
+    SafeMakedirs(os.path.dirname(path))
+
+  # Create the file if nonexistant.
+  open(path, 'a').close()
+  if mode is not None:
+    os.chmod(path, mode)
+  # Update timestamp to right now.
+  os.utime(path, None)
+
+
+def Chown(path, user=None, group=None, recursive=False):
+  """Simple sudo chown path to the user.
+
+  Defaults to user running command. Does nothing if run as root user unless
+  a new owner is provided.
+
+  Args:
+    path: str - File/directory to chown.
+    user: str|int|None - User to chown the file to. Defaults to current user.
+    group: str|int|None - Group to assign the file to.
+    recursive: Also chown child files/directories recursively.
+  """
+  if user is None:
+    user = GetNonRootUser() or ''
+  else:
+    user = str(user)
+
+  group = '' if group is None else str(group)
+
+  if user or group:
+    cmd = ['chown']
+    if recursive:
+      cmd += ['-R']
+    cmd += ['%s:%s' % (user, group), path]
+    cros_build_lib.sudo_run(cmd, print_cmd=False,
+                            stderr=True, stdout=True)
+
+
+def ReadFile(path, mode='r', encoding=None, errors=None):
+  """Read a given file on disk.  Primarily useful for one off small files.
+
+  The defaults are geared towards reading UTF-8 encoded text.
+
+  Args:
+    path: The file to read.
+    mode: The mode to use when opening the file.  'r' is for text files (see the
+      following settings) and 'rb' is for binary files.
+    encoding: The encoding of the file content.  Text files default to 'utf-8'.
+    errors: How to handle encoding errors.  Text files default to 'strict'.
+
+  Returns:
+    The content of the file, either as bytes or a string (with the specified
+    encoding).
+  """
+  if mode not in ('r', 'rb'):
+    raise ValueError('mode may only be "r" or "rb", not %r' % (mode,))
+
+  if 'b' in mode:
+    if encoding is not None or errors is not None:
+      raise ValueError('binary mode does not use encoding/errors')
+  else:
+    if encoding is None:
+      encoding = 'utf-8'
+    if errors is None:
+      errors = 'strict'
+
+  with open(path, 'rb') as f:
+    # TODO(vapier): We can merge encoding/errors into the open call once we are
+    # Python 3 only.  Until then, we have to handle it ourselves.
+    ret = f.read()
+    if 'b' not in mode:
+      ret = ret.decode(encoding, errors)
+    return ret
+
+
+def MD5HashFile(path):
+  """Calculate the md5 hash of a given file path.
+
+  Args:
+    path: The path of the file to hash.
+
+  Returns:
+    The hex digest of the md5 hash of the file.
+  """
+  contents = ReadFile(path, mode='rb')
+  return hashlib.md5(contents).hexdigest()
+
+
+def SafeSymlink(source, dest, sudo=False):
+  """Create a symlink at |dest| pointing to |source|.
+
+  This will override the |dest| if the symlink exists. This operation is not
+  atomic.
+
+  Args:
+    source: source path.
+    dest: destination path.
+    sudo: If True, create the link as root.
+  """
+  if sudo and os.getuid() != 0:
+    cros_build_lib.sudo_run(['ln', '-sfT', source, dest],
+                            print_cmd=False, stderr=True)
+  else:
+    SafeUnlink(dest)
+    os.symlink(source, dest)
+
+
+def SafeUnlink(path, sudo=False):
+  """Unlink a file from disk, ignoring if it doesn't exist.
+
+  Returns:
+    True if the file existed and was removed, False if it didn't exist.
+  """
+  try:
+    os.unlink(path)
+    return True
+  except EnvironmentError as e:
+    if e.errno == errno.ENOENT:
+      return False
+
+    if not sudo:
+      raise
+
+  # If we're still here, we're falling back to sudo.
+  cros_build_lib.sudo_run(['rm', '--', path], print_cmd=False, stderr=True)
+  return True
+
+
+def SafeMakedirs(path, mode=0o775, sudo=False, user='root'):
+  """Make parent directories if needed.  Ignore if existing.
+
+  Args:
+    path: The path to create.  Intermediate directories will be created as
+          needed. This can be either a |Path| or |str|.
+    mode: The access permissions in the style of chmod.
+    sudo: If True, create it via sudo, thus root owned.
+    user: If |sudo| is True, run sudo as |user|.
+
+  Returns:
+    True if the directory had to be created, False if otherwise.
+
+  Raises:
+    EnvironmentError: If the makedir failed.
+    RunCommandError: If using run and the command failed for any reason.
+  """
+  if sudo and not (os.getuid() == 0 and user == 'root'):
+    if os.path.isdir(path):
+      return False
+    cros_build_lib.sudo_run(
+        ['mkdir', '-p', '--mode', '%o' % mode, str(path)], user=user,
+        print_cmd=False, stderr=True, stdout=True)
+    cros_build_lib.sudo_run(
+        ['chmod', '%o' % mode, str(path)],
+        print_cmd=False, stderr=True, stdout=True)
+    return True
+
+  try:
+    os.makedirs(path, mode)
+    # If we made the directory, force the mode.
+    os.chmod(path, mode)
+    return True
+  except EnvironmentError as e:
+    if e.errno != errno.EEXIST or not os.path.isdir(path):
+      raise
+
+  # If the mode on the directory does not match the request, log it.
+  # It is the callers responsibility to coordinate mode values if there is a
+  # need for that.
+  if stat.S_IMODE(os.stat(path).st_mode) != mode:
+    try:
+      os.chmod(path, mode)
+    except EnvironmentError:
+      # Just make sure it's a directory.
+      if not os.path.isdir(path):
+        raise
+  return False
+
+
+class MakingDirsAsRoot(Exception):
+  """Raised when creating directories as root."""
+
+
+def SafeMakedirsNonRoot(path, mode=0o775, user=None):
+  """Create directories and make sure they are not owned by root.
+
+  See SafeMakedirs for the arguments and returns.
+  """
+  if user is None:
+    user = GetNonRootUser()
+
+  if user is None or user == 'root':
+    raise MakingDirsAsRoot('Refusing to create %s as user %s!' % (path, user))
+
+  created = False
+  should_chown = False
+  try:
+    created = SafeMakedirs(path, mode=mode, user=user)
+    if not created:
+      # Sometimes, the directory exists, but is owned by root. As a HACK, we
+      # will chown it to the requested user.
+      stat_info = os.stat(path)
+      should_chown = (stat_info.st_uid == 0)
+  except OSError as e:
+    if e.errno == errno.EACCES:
+      # Sometimes, (a prefix of the) path we're making the directory in may be
+      # owned by root, and so we fail. As a HACK, use da power to create
+      # directory and then chown it.
+      created = should_chown = SafeMakedirs(path, mode=mode, sudo=True)
+
+  if should_chown:
+    Chown(path, user=user)
+
+  return created
+
+
+class BadPathsException(Exception):
+  """Raised by various osutils path manipulation functions on bad input."""
+
+
+def CopyDirContents(from_dir, to_dir, symlinks=False, allow_nonempty=False):
+  """Copy contents of from_dir to to_dir. Both should exist.
+
+  shutil.copytree allows one to copy a rooted directory tree along with the
+  containing directory. OTOH, this function copies the contents of from_dir to
+  an existing directory. For example, for the given paths:
+
+  from/
+    inside/x.py
+    y.py
+  to/
+
+  shutil.copytree('from', 'to')
+  # Raises because 'to' already exists.
+
+  shutil.copytree('from', 'to/non_existent_dir')
+  to/non_existent_dir/
+    inside/x.py
+    y.py
+
+  CopyDirContents('from', 'to')
+  to/
+    inside/x.py
+    y.py
+
+  Args:
+    from_dir: The directory whose contents should be copied. Must exist. Either
+      a |Path| or a |str|.
+    to_dir: The directory to which contents should be copied. Must exist.
+      Either a |Path| or a |str|.
+    symlinks: Whether symlinks should be copied or dereferenced. When True, all
+        symlinks will be copied as symlinks into the destination. When False,
+        the symlinks will be dereferenced and the contents copied over.
+    allow_nonempty: If True, do not die when to_dir is nonempty.
+
+  Raises:
+    BadPathsException: if the source / target directories don't exist, or if
+        target directory is non-empty when allow_nonempty=False.
+    OSError: on esoteric permission errors.
+  """
+  if not os.path.isdir(from_dir):
+    raise BadPathsException('Source directory %s does not exist.' % from_dir)
+  if not os.path.isdir(to_dir):
+    raise BadPathsException('Destination directory %s does not exist.' % to_dir)
+  if os.listdir(to_dir) and not allow_nonempty:
+    raise BadPathsException('Destination directory %s is not empty.' % to_dir)
+
+  for name in os.listdir(from_dir):
+    from_path = os.path.join(from_dir, name)
+    to_path = os.path.join(to_dir, name)
+    if symlinks and os.path.islink(from_path):
+      os.symlink(os.readlink(from_path), to_path)
+    elif os.path.isdir(from_path):
+      shutil.copytree(from_path, to_path, symlinks=symlinks)
+    elif os.path.isfile(from_path):
+      shutil.copy2(from_path, to_path)
+
+
+def RmDir(path, ignore_missing=False, sudo=False):
+  """Recursively remove a directory.
+
+  Args:
+    path: Path of directory to remove. Either a |Path| or |str|.
+    ignore_missing: Do not error when path does not exist.
+    sudo: Remove directories as root.
+  """
+  # Using `sudo` is a bit expensive, so try to delete everything natively first.
+  try:
+    shutil.rmtree(path)
+    return
+  except EnvironmentError as e:
+    if ignore_missing and e.errno == errno.ENOENT:
+      return
+
+    if not sudo:
+      raise
+
+  # If we're still here, we're falling back to sudo.
+  try:
+    cros_build_lib.sudo_run(
+        ['rm', '-r%s' % ('f' if ignore_missing else '',), '--', str(path)],
+        debug_level=logging.DEBUG, stdout=True, stderr=True)
+  except cros_build_lib.RunCommandError:
+    if not ignore_missing or os.path.exists(path):
+      # If we're not ignoring the rm ENOENT equivalent, throw it;
+      # if the pathway still exists, something failed, thus throw it.
+      raise
+
+
+class EmptyDirNonExistentException(BadPathsException):
+  """EmptyDir was called on a non-existent directory without ignore_missing."""
+
+
+def EmptyDir(path, ignore_missing=False, sudo=False, exclude=()):
+  """Remove all files inside a directory, including subdirs.
+
+  Args:
+    path: Path of directory to empty.
+    ignore_missing: Do not error when path does not exist.
+    sudo: Remove directories as root.
+    exclude: Iterable of file names to exclude from the cleanup. They should
+             exactly match the file or directory name in path.
+             e.g. ['foo', 'bar']
+
+  Raises:
+    EmptyDirNonExistentException: if ignore_missing false, and dir is missing.
+    OSError: If the directory is not user writable.
+  """
+  path = ExpandPath(path)
+  exclude = set(exclude)
+
+  if not os.path.exists(path):
+    if ignore_missing:
+      return
+    raise EmptyDirNonExistentException(
+        'EmptyDir called non-existent: %s' % path)
+
+  # We don't catch OSError if path is not a directory.
+  for candidate in os.listdir(path):
+    if candidate not in exclude:
+      subpath = os.path.join(path, candidate)
+      # Both options can throw OSError if there is a permission problem.
+      if os.path.isdir(subpath):
+        RmDir(subpath, ignore_missing=ignore_missing, sudo=sudo)
+      else:
+        SafeUnlink(subpath, sudo)
+
+
+def Which(binary, path=None, mode=os.X_OK, root=None):
+  """Return the absolute path to the specified binary.
+
+  Args:
+    binary: The binary to look for.
+    path: Search path. Defaults to os.environ['PATH'].
+    mode: File mode to check on the binary.
+    root: Path to automatically prefix to every element of |path|.
+
+  Returns:
+    The full path to |binary| if found (with the right mode). Otherwise, None.
+  """
+  if path is None:
+    path = os.environ.get('PATH', '')
+  for p in path.split(os.pathsep):
+    if root and p.startswith('/'):
+      # Don't prefix relative paths.  We might want to support this at some
+      # point, but it's not worth the coding hassle currently.
+      p = os.path.join(root, p.lstrip('/'))
+    p = os.path.join(p, binary)
+    if os.path.isfile(p) and os.access(p, mode):
+      return p
+  return None
+
+
+def FindMissingBinaries(needed_tools):
+  """Verifies that the required tools are present on the system.
+
+  This is especially important for scripts that are intended to run
+  outside the chroot.
+
+  Args:
+    needed_tools: an array of string specified binaries to look for.
+
+  Returns:
+    If all tools are found, returns the empty list. Otherwise, returns the
+    list of missing tools.
+  """
+  return [binary for binary in needed_tools if Which(binary) is None]
+
+
+def DirectoryIterator(base_path):
+  """Iterates through the files and subdirs of a directory."""
+  for root, dirs, files in os.walk(base_path):
+    for e in [d + os.sep for d in dirs] + files:
+      yield os.path.join(root, e)
+
+
+def IteratePaths(end_path):
+  """Generator that iterates down to |end_path| from root /.
+
+  Args:
+    end_path: The destination. If this is a relative path, it will be resolved
+        to absolute path. In all cases, it will be normalized.
+
+  Yields:
+    All the paths gradually constructed from / to |end_path|. For example:
+    IteratePaths("/this/path") yields "/", "/this", and "/this/path".
+  """
+  return reversed(list(IteratePathParents(end_path)))
+
+
+def IteratePathParents(start_path):
+  """Generator that iterates through a directory's parents.
+
+  Args:
+    start_path: The path to start from.
+
+  Yields:
+    The passed-in path, along with its parents.  i.e.,
+    IteratePathParents('/usr/local') would yield '/usr/local', '/usr', and '/'.
+  """
+  path = os.path.abspath(start_path)
+  # There's a bug that abspath('//') returns '//'. We need to renormalize it.
+  if path == '//':
+    path = '/'
+  yield path
+  while path.strip('/'):
+    path = os.path.dirname(path)
+    yield path
+
+
+def FindInPathParents(path_to_find, start_path, test_func=None, end_path=None):
+  """Look for a relative path, ascending through parent directories.
+
+  Ascend through parent directories of current path looking for a relative
+  path.  I.e., given a directory structure like:
+  -/
+   |
+   --usr
+     |
+     --bin
+     |
+     --local
+       |
+       --google
+
+  the call FindInPathParents('bin', '/usr/local') would return '/usr/bin', and
+  the call FindInPathParents('google', '/usr/local') would return
+  '/usr/local/google'.
+
+  Args:
+    path_to_find: The relative path to look for.
+    start_path: The path to start the search from.  If |start_path| is a
+      directory, it will be included in the directories that are searched.
+    test_func: The function to use to verify the relative path.  Defaults to
+      os.path.exists.  The function will be passed one argument - the target
+      path to test.  A True return value will cause AscendingLookup to return
+      the target.
+    end_path: The path to stop searching.
+  """
+  if end_path is not None:
+    end_path = os.path.abspath(end_path)
+  if test_func is None:
+    test_func = os.path.exists
+  for path in IteratePathParents(start_path):
+    if path == end_path:
+      return None
+    target = os.path.join(path, path_to_find)
+    if test_func(target):
+      return target
+  return None
+
+
+def SetGlobalTempDir(tempdir_value, tempdir_env=None):
+  """Set the global temp directory to the specified |tempdir_value|
+
+  Args:
+    tempdir_value: The new location for the global temp directory.
+    tempdir_env: Optional. A list of key/value pairs to set in the
+      environment. If not provided, set all global tempdir environment
+      variables to point at |tempdir_value|.
+
+  Returns:
+    Returns (old_tempdir_value, old_tempdir_env).
+
+    old_tempdir_value: The old value of the global temp directory.
+    old_tempdir_env: A list of the key/value pairs that control the tempdir
+      environment and were set prior to this function. If the environment
+      variable was not set, it is recorded as None.
+  """
+  # pylint: disable=protected-access
+  with tempfile._once_lock:
+    old_tempdir_value = GetGlobalTempDir()
+    old_tempdir_env = tuple((x, os.environ.get(x)) for x in _TEMPDIR_ENV_VARS)
+
+    # Now update TMPDIR/TEMP/TMP, and poke the python
+    # internals to ensure all subprocess/raw tempfile
+    # access goes into this location.
+    if tempdir_env is None:
+      os.environ.update((x, tempdir_value) for x in _TEMPDIR_ENV_VARS)
+    else:
+      for key, value in tempdir_env:
+        if value is None:
+          os.environ.pop(key, None)
+        else:
+          os.environ[key] = value
+
+    # Finally, adjust python's cached value (we know it's cached by here
+    # since we invoked _get_default_tempdir from above).  Note this
+    # is necessary since we want *all* output from that point
+    # forward to go to this location.
+    tempfile.tempdir = tempdir_value
+
+  return (old_tempdir_value, old_tempdir_env)
+
+
+def GetGlobalTempDir():
+  """Get the path to the current global tempdir.
+
+  The global tempdir path can be modified through calls to SetGlobalTempDir.
+  """
+  # pylint: disable=protected-access
+  return tempfile._get_default_tempdir()
+
+
+def _TempDirSetup(self, prefix='tmp', set_global=False, base_dir=None):
+  """Generate a tempdir, modifying the object, and env to use it.
+
+  Specifically, if set_global is True, then from this invocation forward,
+  python and all subprocesses will use this location for their tempdir.
+
+  The matching _TempDirTearDown restores the env to what it was.
+  """
+  # Stash the old tempdir that was used so we can
+  # switch it back on the way out.
+  self.tempdir = tempfile.mkdtemp(prefix=prefix, dir=base_dir)
+  os.chmod(self.tempdir, 0o700)
+
+  if set_global:
+    self._orig_tempdir_value, self._orig_tempdir_env = \
+        SetGlobalTempDir(self.tempdir)
+
+
+def _TempDirTearDown(self, force_sudo, delete=True):
+  # Note that _TempDirSetup may have failed, resulting in these attributes
+  # not being set; this is why we use getattr here (and must).
+  tempdir = getattr(self, 'tempdir', None)
+  try:
+    if tempdir is not None and delete:
+      RmDir(tempdir, ignore_missing=True, sudo=force_sudo)
+  except EnvironmentError as e:
+    # Suppress ENOENT since we may be invoked
+    # in a context where parallel wipes of the tempdir
+    # may be occuring; primarily during hard shutdowns.
+    if e.errno != errno.ENOENT:
+      raise
+
+  # Restore environment modification if necessary.
+  orig_tempdir_value = getattr(self, '_orig_tempdir_value', None)
+  if orig_tempdir_value is not None:
+    # pylint: disable=protected-access
+    SetGlobalTempDir(orig_tempdir_value, self._orig_tempdir_env)
+
+
+class TempDir(object):
+  """Object that creates a temporary directory.
+
+  This object can either be used as a context manager or just as a simple
+  object. The temporary directory is stored as self.tempdir in the object, and
+  is returned as a string by a 'with' statement.
+  """
+
+  def __init__(self, **kwargs):
+    """Constructor. Creates the temporary directory.
+
+    Args:
+      prefix: See tempfile.mkdtemp documentation.
+      base_dir: The directory to place the temporary directory.
+      set_global: Set this directory as the global temporary directory.
+      delete: Whether the temporary dir should be deleted as part of cleanup.
+          (default: True)
+      sudo_rm: Whether the temporary dir will need root privileges to remove.
+          (default: False)
+    """
+    self.kwargs = kwargs.copy()
+    self.delete = kwargs.pop('delete', True)
+    self.sudo_rm = kwargs.pop('sudo_rm', False)
+    self.tempdir = None
+    _TempDirSetup(self, **kwargs)
+
+  def SetSudoRm(self, enable=True):
+    """Sets |sudo_rm|, which forces us to delete temporary files as root."""
+    self.sudo_rm = enable
+
+  def Cleanup(self):
+    """Clean up the temporary directory."""
+    if self.tempdir is not None:
+      try:
+        _TempDirTearDown(self, self.sudo_rm, delete=self.delete)
+      finally:
+        self.tempdir = None
+
+  def __enter__(self):
+    """Return the temporary directory."""
+    return self.tempdir
+
+  def __exit__(self, exc_type, exc_value, exc_traceback):
+    try:
+      self.Cleanup()
+    except Exception:
+      if exc_type:
+        # If an exception from inside the context was already in progress,
+        # log our cleanup exception, then allow the original to resume.
+        logging.error('While exiting %s:', self, exc_info=True)
+
+        if self.tempdir:
+          # Log all files in tempdir at the time of the failure.
+          try:
+            logging.error('Directory contents were:')
+            for name in os.listdir(self.tempdir):
+              logging.error('  %s', name)
+          except OSError:
+            logging.error('  Directory did not exist.')
+
+          # Log all mounts at the time of the failure, since that's the most
+          # common cause.
+          mount_results = cros_build_lib.run(
+              ['mount'], stdout=True, stderr=subprocess.STDOUT,
+              check=False)
+          logging.error('Mounts were:')
+          logging.error('  %s', mount_results.output)
+
+      else:
+        # If there was not an exception from the context, raise ours.
+        raise
+
+  def __del__(self):
+    self.Cleanup()
+
+  def __str__(self):
+    return self.tempdir if self.tempdir else ''
+
+
+def TempDirDecorator(func):
+  """Populates self.tempdir with path to a temporary writeable directory."""
+  def f(self, *args, **kwargs):
+    with TempDir() as tempdir:
+      self.tempdir = tempdir
+      return func(self, *args, **kwargs)
+
+  f.__name__ = func.__name__
+  f.__doc__ = func.__doc__
+  f.__module__ = func.__module__
+  return f
+
+
+def TempFileDecorator(func):
+  """Populates self.tempfile with path to a temporary writeable file"""
+  def f(self, *args, **kwargs):
+    with tempfile.NamedTemporaryFile(dir=self.tempdir, delete=False) as f:
+      self.tempfile = f.name
+    return func(self, *args, **kwargs)
+
+  f.__name__ = func.__name__
+  f.__doc__ = func.__doc__
+  f.__module__ = func.__module__
+  return TempDirDecorator(f)
+
+
+# Flags synced from sys/mount.h.  See mount(2) for details.
+MS_RDONLY = 1
+MS_NOSUID = 2
+MS_NODEV = 4
+MS_NOEXEC = 8
+MS_SYNCHRONOUS = 16
+MS_REMOUNT = 32
+MS_MANDLOCK = 64
+MS_DIRSYNC = 128
+MS_NOATIME = 1024
+MS_NODIRATIME = 2048
+MS_BIND = 4096
+MS_MOVE = 8192
+MS_REC = 16384
+MS_SILENT = 32768
+MS_POSIXACL = 1 << 16
+MS_UNBINDABLE = 1 << 17
+MS_PRIVATE = 1 << 18
+MS_SLAVE = 1 << 19
+MS_SHARED = 1 << 20
+MS_RELATIME = 1 << 21
+MS_KERNMOUNT = 1 << 22
+MS_I_VERSION = 1 << 23
+MS_STRICTATIME = 1 << 24
+MS_ACTIVE = 1 << 30
+MS_NOUSER = 1 << 31
+
+
+def Mount(source, target, fstype, flags, data=''):
+  """Call the mount(2) func; see the man page for details."""
+  libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+  # These fields might be a string or 0 (for NULL).  Convert to bytes.
+  def _MaybeEncode(s):
+    return s.encode('utf-8') if isinstance(s, six.string_types) else s
+  if libc.mount(_MaybeEncode(source), _MaybeEncode(target),
+                _MaybeEncode(fstype), ctypes.c_int(flags),
+                _MaybeEncode(data)) != 0:
+    e = ctypes.get_errno()
+    raise OSError(e, os.strerror(e))
+
+
+def MountDir(src_path, dst_path, fs_type=None, sudo=True, makedirs=True,
+             mount_opts=('nodev', 'noexec', 'nosuid'), skip_mtab=False,
+             **kwargs):
+  """Mount |src_path| at |dst_path|
+
+  Args:
+    src_path: Source of the new mount.
+    dst_path: Where to mount things.
+    fs_type: Specify the filesystem type to use.  Defaults to autodetect.
+    sudo: Run through sudo.
+    makedirs: Create |dst_path| if it doesn't exist.
+    mount_opts: List of options to pass to `mount`.
+    skip_mtab: Whether to write new entries to /etc/mtab.
+    kwargs: Pass all other args to run.
+  """
+  if sudo:
+    runcmd = cros_build_lib.sudo_run
+  else:
+    runcmd = cros_build_lib.run
+
+  if makedirs:
+    SafeMakedirs(dst_path, sudo=sudo)
+
+  cmd = ['mount', src_path, dst_path]
+  if skip_mtab:
+    cmd += ['-n']
+  if fs_type:
+    cmd += ['-t', fs_type]
+  if mount_opts:
+    cmd += ['-o', ','.join(mount_opts)]
+  runcmd(cmd, **kwargs)
+
+
+def MountTmpfsDir(path, name='osutils.tmpfs', size='5G',
+                  mount_opts=('nodev', 'noexec', 'nosuid'), **kwargs):
+  """Mount a tmpfs at |path|
+
+  Args:
+    path: Directory to mount the tmpfs.
+    name: Friendly name to include in mount output.
+    size: Size of the temp fs.
+    mount_opts: List of options to pass to `mount`.
+    kwargs: Pass all other args to MountDir.
+  """
+  mount_opts = list(mount_opts) + ['size=%s' % size]
+  MountDir(name, path, fs_type='tmpfs', mount_opts=mount_opts, **kwargs)
+
+
+def UmountDir(path, lazy=True, sudo=True, cleanup=True):
+  """Unmount a previously mounted temp fs mount.
+
+  Args:
+    path: Directory to unmount.
+    lazy: Whether to do a lazy unmount.
+    sudo: Run through sudo.
+    cleanup: Whether to delete the |path| after unmounting.
+             Note: Does not work when |lazy| is set.
+  """
+  if sudo:
+    runcmd = cros_build_lib.sudo_run
+  else:
+    runcmd = cros_build_lib.run
+
+  cmd = ['umount', '-d', path]
+  if lazy:
+    cmd += ['-l']
+  runcmd(cmd, debug_level=logging.DEBUG)
+
+  if cleanup:
+    # We will randomly get EBUSY here even when the umount worked.  Suspect
+    # this is due to the host distro doing stupid crap on us like autoscanning
+    # directories when they get mounted.
+    def _retry(e):
+      # When we're using `rm` (which is required for sudo), we can't cleanly
+      # detect the aforementioned failure.  This is because `rm` will see the
+      # errno, handle itself, and then do exit(1).  Which means all we see is
+      # that rm failed.  Assume it's this issue as -rf will ignore most things.
+      if isinstance(e, cros_build_lib.RunCommandError):
+        return True
+      elif isinstance(e, OSError):
+        # When we aren't using sudo, we do the unlink ourselves, so the exact
+        # errno is bubbled up to us and we can detect it specifically without
+        # potentially ignoring all other possible failures.
+        return e.errno == errno.EBUSY
+      else:
+        # Something else, we don't know so do not retry.
+        return False
+    retry_util.GenericRetry(_retry, 60, RmDir, path, sudo=sudo, sleep=1)
+
+
+def UmountTree(path):
+  """Unmounts |path| and any submounts under it."""
+  # Scrape it from /proc/mounts since it's easily accessible;
+  # additionally, unmount in reverse order of what's listed there
+  # rather than trying a reverse sorting; it's possible for
+  # mount /z /foon
+  # mount /foon/blah -o loop /a
+  # which reverse sorting cannot handle.
+  path = os.path.realpath(path).rstrip('/') + '/'
+  mounts = [mtab.destination for mtab in IterateMountPoints() if
+            mtab.destination.startswith(path) or
+            mtab.destination == path.rstrip('/')]
+
+  for mount_pt in reversed(mounts):
+    UmountDir(mount_pt, lazy=False, cleanup=False)
+
+
+def SetEnvironment(env):
+  """Restore the environment variables to that of passed in dictionary."""
+  os.environ.clear()
+  os.environ.update(env)
+
+
+def SourceEnvironment(script, whitelist, ifs=',', env=None, multiline=False):
+  """Returns the environment exported by a shell script.
+
+  Note that the script is actually executed (sourced), so do not use this on
+  files that have side effects (such as modify the file system).  Stdout will
+  be sent to /dev/null, so just echoing is OK.
+
+  Args:
+    script: The shell script to 'source'.
+    whitelist: An iterable of environment variables to retrieve values for.
+    ifs: When showing arrays, what separator to use.
+    env: A dict of the initial env to pass down.  You can also pass it None
+         (to clear the env) or True (to preserve the current env).
+    multiline: Allow a variable to span multiple lines.
+
+  Returns:
+    A dictionary containing the values of the whitelisted environment
+    variables that are set.
+  """
+  dump_script = ['source "%s" >/dev/null' % script,
+                 'IFS="%s"' % ifs]
+  for var in whitelist:
+    # Note: If we want to get more exact results out of bash, we should switch
+    # to using `declare -p "${var}"`.  It would require writing a custom parser
+    # here, but it would be more robust.
+    dump_script.append(
+        '[[ "${%(var)s+set}" == "set" ]] && echo "%(var)s=\\"${%(var)s[*]}\\""'
+        % {'var': var})
+  dump_script.append('exit 0')
+
+  if env is None:
+    env = {}
+  elif env is True:
+    env = None
+  output = cros_build_lib.run(['bash'], env=env, capture_output=True,
+                              print_cmd=False, encoding='utf-8',
+                              input='\n'.join(dump_script)).output
+  return key_value_store.LoadData(output, multiline=multiline)
+
+
+def ListBlockDevices(device_path=None, in_bytes=False):
+  """Lists all block devices.
+
+  Args:
+    device_path: device path (e.g. /dev/sdc).
+    in_bytes: whether to display size in bytes.
+
+  Returns:
+    A list of BlockDevice items with attributes 'NAME', 'RM', 'TYPE',
+    'SIZE' (RM stands for removable).
+  """
+  keys = ['NAME', 'RM', 'TYPE', 'SIZE']
+  BlockDevice = collections.namedtuple('BlockDevice', keys)
+
+  cmd = ['lsblk', '--pairs']
+  if in_bytes:
+    cmd.append('--bytes')
+
+  if device_path:
+    cmd.append(device_path)
+
+  cmd += ['--output', ','.join(keys)]
+  result = cros_build_lib.dbg_run(cmd, capture_output=True, encoding='utf-8')
+  devices = []
+  for line in result.stdout.strip().splitlines():
+    d = {}
+    for k, v in re.findall(r'(\S+?)=\"(.+?)\"', line):
+      d[k] = v
+
+    devices.append(BlockDevice(**d))
+
+  return devices
+
+
+def GetDeviceInfo(device, keyword='model'):
+  """Get information of |device| by searching through device path.
+
+    Looks for the file named |keyword| in the path upwards from
+    /sys/block/|device|/device. This path is a symlink and will be fully
+    expanded when searching.
+
+  Args:
+    device: Device name (e.g. 'sdc').
+    keyword: The filename to look for (e.g. product, model).
+
+  Returns:
+    The content of the |keyword| file.
+  """
+  device_path = os.path.join('/sys', 'block', device)
+  if not os.path.isdir(device_path):
+    raise ValueError('%s is not a valid device path.' % device_path)
+
+  path_list = ExpandPath(os.path.join(device_path, 'device')).split(os.path.sep)
+  while len(path_list) > 2:
+    target = os.path.join(os.path.sep.join(path_list), keyword)
+    if os.path.isfile(target):
+      return ReadFile(target).strip()
+
+    path_list = path_list[:-1]
+
+
+def GetDeviceSize(device_path, in_bytes=False):
+  """Returns the size of |device|.
+
+  Args:
+    device_path: Device path (e.g. '/dev/sdc').
+    in_bytes: If set True, returns the size in bytes.
+
+  Returns:
+    Size of the device in human readable format unless |in_bytes| is set.
+  """
+  devices = ListBlockDevices(device_path=device_path, in_bytes=in_bytes)
+  for d in devices:
+    if d.TYPE == 'disk':
+      return int(d.SIZE) if in_bytes else d.SIZE
+
+  raise ValueError('No size info of %s is found.' % device_path)
+
+
+FileInfo = collections.namedtuple(
+    'FileInfo', ['path', 'owner', 'size', 'atime', 'mtime'])
+
+
+def StatFilesInDirectory(path, recursive=False, to_string=False):
+  """Stat files in the directory |path|.
+
+  Args:
+    path: Path to the target directory.
+    recursive: Whether to recurisvely list all files in |path|.
+    to_string: Whether to return a string containing the metadata of the
+      files.
+
+  Returns:
+    If |to_string| is False, returns a list of FileInfo objects. Otherwise,
+    returns a string of metadata of the files.
+  """
+  path = ExpandPath(path)
+  def ToFileInfo(path, stat_val):
+    return FileInfo(path,
+                    pwd.getpwuid(stat_val.st_uid)[0],
+                    stat_val.st_size,
+                    datetime.datetime.fromtimestamp(stat_val.st_atime),
+                    datetime.datetime.fromtimestamp(stat_val.st_mtime))
+
+  file_infos = []
+  for root, dirs, files in os.walk(path, topdown=True):
+    for filename in dirs + files:
+      filepath = os.path.join(root, filename)
+      file_infos.append(ToFileInfo(filepath, os.lstat(filepath)))
+
+    if not recursive:
+      # Process only the top-most directory.
+      break
+
+  if not to_string:
+    return file_infos
+
+  msg = 'Listing the content of %s' % path
+  msg_format = ('Path: {x.path}, Owner: {x.owner}, Size: {x.size} bytes, '
+                'Accessed: {x.atime}, Modified: {x.mtime}')
+  msg = '%s\n%s' % (msg,
+                    '\n'.join([msg_format.format(x=x) for x in file_infos]))
+  return msg
+
+
[email protected]
+def ChdirContext(target_dir):
+  """A context manager to chdir() into |target_dir| and back out on exit.
+
+  Args:
+    target_dir: A target directory to chdir into.
+  """
+
+  cwd = os.getcwd()
+  os.chdir(target_dir)
+  try:
+    yield
+  finally:
+    os.chdir(cwd)
+
+
+def _SameFileSystem(path1, path2):
+  """Determine whether two paths are on the same filesystem.
+
+  Be resilient to nonsense paths. Return False instead of blowing up.
+  """
+  try:
+    return os.stat(path1).st_dev == os.stat(path2).st_dev
+  except OSError:
+    return False
+
+
+class MountOverlayContext(object):
+  """A context manager for mounting an OverlayFS directory.
+
+  An overlay filesystem will be mounted at |mount_dir|, and will be unmounted
+  when the context exits.
+  """
+
+  OVERLAY_FS_MOUNT_ERRORS = (32,)
+  def __init__(self, lower_dir, upper_dir, mount_dir, cleanup=False):
+    """Initialize.
+
+    Args:
+      lower_dir: The lower directory (read-only).
+      upper_dir: The upper directory (read-write).
+      mount_dir: The mount point for the merged overlay.
+      cleanup: Whether to remove the mount point after unmounting. This uses an
+          internal retry logic for cases where unmount is successful but the
+          directory still appears busy, and is generally more resilient than
+          removing it independently.
+    """
+    self._lower_dir = lower_dir
+    self._upper_dir = upper_dir
+    self._mount_dir = mount_dir
+    self._cleanup = cleanup
+    self.tempdir = None
+
+  def __enter__(self):
+    # Upstream Kernel 3.18 and the ubuntu backport of overlayfs have different
+    # APIs. We must support both.
+    try_legacy = False
+    stashed_e_overlay_str = None
+
+    # We must ensure that upperdir and workdir are on the same filesystem.
+    if _SameFileSystem(self._upper_dir, GetGlobalTempDir()):
+      _TempDirSetup(self)
+    elif _SameFileSystem(self._upper_dir, os.path.dirname(self._upper_dir)):
+      _TempDirSetup(self, base_dir=os.path.dirname(self._upper_dir))
+    else:
+      logging.debug('Could create find a workdir on the same filesystem as %s. '
+                    'Trying legacy API instead.',
+                    self._upper_dir)
+      try_legacy = True
+
+    if not try_legacy:
+      try:
+        MountDir('overlay', self._mount_dir, fs_type='overlay', makedirs=False,
+                 mount_opts=('lowerdir=%s' % self._lower_dir,
+                             'upperdir=%s' % self._upper_dir,
+                             'workdir=%s' % self.tempdir),
+                 quiet=True)
+      except cros_build_lib.RunCommandError as e_overlay:
+        if e_overlay.result.returncode not in self.OVERLAY_FS_MOUNT_ERRORS:
+          raise
+        logging.debug('Failed to mount overlay filesystem. Trying legacy API.')
+        stashed_e_overlay_str = str(e_overlay)
+        try_legacy = True
+
+    if try_legacy:
+      try:
+        MountDir('overlayfs', self._mount_dir, fs_type='overlayfs',
+                 makedirs=False,
+                 mount_opts=('lowerdir=%s' % self._lower_dir,
+                             'upperdir=%s' % self._upper_dir),
+                 quiet=True)
+      except cros_build_lib.RunCommandError as e_overlayfs:
+        logging.error('All attempts at mounting overlay filesystem failed.')
+        if stashed_e_overlay_str is not None:
+          logging.error('overlay: %s', stashed_e_overlay_str)
+        logging.error('overlayfs: %s', str(e_overlayfs))
+        raise
+
+    return self
+
+  def __exit__(self, exc_type, exc_value, traceback):
+    UmountDir(self._mount_dir, cleanup=self._cleanup)
+    _TempDirTearDown(self, force_sudo=True)
+
+
+MountInfo = collections.namedtuple(
+    'MountInfo',
+    'source destination filesystem options')
+
+
+def IterateMountPoints(proc_file='/proc/mounts'):
+  """Iterate over all mounts as reported by "/proc/mounts".
+
+  Args:
+    proc_file: A path to a file whose content is similar to /proc/mounts.
+      Default to "/proc/mounts" itself.
+
+  Returns:
+    A generator that yields MountInfo objects.
+  """
+  with open(proc_file) as f:
+    for line in f:
+      # Escape any \xxx to a char.
+      source, destination, filesystem, options, _, _ = [
+          re.sub(r'\\([0-7]{3})', lambda m: chr(int(m.group(1), 8)), x)
+          for x in line.split()
+      ]
+      mtab = MountInfo(source, destination, filesystem, options)
+      yield mtab
+
+
+def IsMounted(path):
+  """Determine if |path| is already mounted or not."""
+  path = os.path.realpath(path).rstrip('/')
+  mounts = [mtab.destination for mtab in IterateMountPoints()]
+  if path in mounts:
+    return True
+
+  return False
+
+
+def ResolveSymlinkInRoot(file_name, root):
+  """Resolve a symlink |file_name| relative to |root|.
+
+  This can be used to resolve absolute symlinks within an alternative root
+  path (i.e. chroot). For example:
+
+    ROOT-A/absolute_symlink --> /an/abs/path
+    ROOT-A/relative_symlink --> a/relative/path
+
+    absolute_symlink will be resolved to ROOT-A/an/abs/path
+    relative_symlink will be resolved to ROOT-A/a/relative/path
+
+  Args:
+    file_name (str): A path to the file.
+    root (str|None): A path to the root directory.
+
+  Returns:
+    |file_name| if |file_name| is not a symlink. Otherwise, the ultimate path
+    that |file_name| points to, with links resolved relative to |root|.
+  """
+  count = 0
+  while os.path.islink(file_name):
+    count += 1
+    if count > 128:
+      raise ValueError('Too many link levels for %s.' % file_name)
+    link = os.readlink(file_name)
+    if link.startswith('/'):
+      file_name = os.path.join(root, link[1:]) if root else link
+    else:
+      file_name = os.path.join(os.path.dirname(file_name), link)
+  return file_name
+
+
+def ResolveSymlink(file_name):
+  """Resolve a symlink |file_name| to an absolute path.
+
+  This is similar to ResolveSymlinkInRoot, but does not resolve absolute
+  symlinks to an alternative root, and normalizes the path before returning.
+
+  Args:
+    file_name (str): The symlink.
+
+  Returns:
+    str - |file_name| if |file_name| is not a symlink. Otherwise, the ultimate
+    path that |file_name| points to.
+  """
+  return os.path.realpath(ResolveSymlinkInRoot(file_name, None))
+
+
+def IsInsideVm():
+  """Return True if we are running inside a virtual machine.
+
+  The detection is based on the model of the hard drive.
+  """
+  for blk_model in glob.glob('/sys/block/*/device/model'):
+    if os.path.isfile(blk_model):
+      model = ReadFile(blk_model)
+      if model.startswith('VBOX') or model.startswith('VMware'):
+        return True
+
+  return False
diff --git a/utils/frozen_chromite/lib/parallel.py b/utils/frozen_chromite/lib/parallel.py
new file mode 100644
index 0000000..bc894f3
--- /dev/null
+++ b/utils/frozen_chromite/lib/parallel.py
@@ -0,0 +1,848 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module for running cbuildbot stages in the background."""
+
+from __future__ import print_function
+
+import collections
+import contextlib
+import ctypes
+import errno
+import functools
+import multiprocessing
+from multiprocessing.managers import SyncManager
+import os
+import signal
+import sys
+import time
+import traceback
+
+import six
+from six.moves import queue as Queue
+
+from autotest_lib.utils.frozen_chromite.lib import failures_lib
+from autotest_lib.utils.frozen_chromite.lib import results_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import signals
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+
+
+_BUFSIZE = 1024
+
+
+class HackTimeoutSyncManager(SyncManager):
+  """Increase the process join timeout in SyncManager.
+
+  The timeout for the manager process to join in the core library is
+  too low. The process is often killed before shutting down properly,
+  resulting in temporary directories (pymp-xxx) not being cleaned
+  up. This class increases the default timeout.
+  """
+
+  @staticmethod
+  def _finalize_manager(process, *args, **kwargs):
+    """Shutdown the manager process."""
+
+    def _join(functor, *args, **kwargs):
+      timeout = kwargs.get('timeout')
+      if not timeout is None and timeout < 1:
+        kwargs['timeout'] = 1
+
+      functor(*args, **kwargs)
+
+    process.join = functools.partial(_join, process.join)
+    SyncManager._finalize_manager(process, *args, **kwargs)
+
+
+def IgnoreSigintAndSigterm():
+  """Ignores any future SIGINTs and SIGTERMs."""
+  signal.signal(signal.SIGINT, signal.SIG_IGN)
+  signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+
+def Manager():
+  """Create a background process for managing interprocess communication.
+
+  This manager wraps multiprocessing.Manager() and ensures that any sockets
+  created during initialization are created under the /tmp tree rather than in a
+  custom temp directory. This is needed because TMPDIR might be really long, and
+  named sockets are limited to 108 characters.
+
+  Examples:
+    with Manager() as manager:
+      queue = manager.Queue()
+      ...
+
+  Returns:
+    The return value of multiprocessing.Manager()
+  """
+  # Use a short directory in /tmp. Do not use /tmp directly to keep these
+  # temperary files together and because certain environments do not like too
+  # many top-level paths in /tmp (see crbug.com/945523).
+  # Make it mode 1777 to mirror /tmp, so that we don't have failures when root
+  # calls parallel first, and some other user calls it later.
+  tmp_dir = '/tmp/chromite.parallel.%d' % os.geteuid()
+  osutils.SafeMakedirs(tmp_dir, mode=0o1777)
+  old_tempdir_value, old_tempdir_env = osutils.SetGlobalTempDir(tmp_dir)
+  try:
+    m = HackTimeoutSyncManager()
+    # SyncManager doesn't handle KeyboardInterrupt exceptions well; pipes get
+    # broken and E_NOENT or E_PIPE errors are thrown from various places. We
+    # can just ignore SIGINT in the SyncManager and things will close properly
+    # when the enclosing with-statement exits.
+    m.start(IgnoreSigintAndSigterm)
+    return m
+  finally:
+    osutils.SetGlobalTempDir(old_tempdir_value, old_tempdir_env)
+
+
+class BackgroundFailure(failures_lib.CompoundFailure):
+  """Exception to show a step failed while running in a background process."""
+
+
+class ProcessExitTimeout(Exception):
+  """Raised if a process cannot exit within the timeout."""
+
+
+class ProcessUnexpectedExit(Exception):
+  """Raised if a process exits unexpectedly."""
+
+
+class ProcessSilentTimeout(Exception):
+  """Raised when there is no output for a prolonged period of time."""
+
+
+class UnexpectedException(Exception):
+  """Raised when exception occurs at an unexpected place."""
+
+
+class _BackgroundTask(multiprocessing.Process):
+  """Run a task in the background.
+
+  This task may be the 'Run' function from a buildbot stage or just a plain
+  function. It will be run in the background. Output from this task is saved
+  to a temporary file and is printed when the 'Wait' function is called.
+  """
+
+  # The time we give Python to startup and exit.
+  STARTUP_TIMEOUT = 60 * 5
+  EXIT_TIMEOUT = 60 * 10
+
+  # The time we allow processes to be silent. This is in place so that we
+  # eventually catch hanging processes, and print the remainder of our output.
+  # Do not increase this. Instead, adjust your program to print regular progress
+  # updates, so that cbuildbot (and buildbot) can know that it has not hung.
+  SILENT_TIMEOUT = 60 * 145
+
+  # The amount by which we reduce the SILENT_TIMEOUT every time we launch
+  # a subprocess. This helps ensure that children get a chance to enforce the
+  # SILENT_TIMEOUT prior to the parents enforcing it.
+  SILENT_TIMEOUT_STEP = 30
+  MINIMUM_SILENT_TIMEOUT = 60 * 135
+
+  # The time before terminating or killing a task.
+  SIGTERM_TIMEOUT = 30
+  SIGKILL_TIMEOUT = 60
+
+  # How long we allow debug commands to run (so we don't hang will trying to
+  # recover from a hang).
+  DEBUG_CMD_TIMEOUT = 60
+
+  # Interval we check for updates from print statements.
+  PRINT_INTERVAL = 1
+
+  def __init__(self, task, queue, semaphore=None, task_args=None,
+               task_kwargs=None):
+    """Create a new _BackgroundTask object.
+
+    If semaphore is supplied, it will be acquired for the duration of the
+    steps that are run in the background. This can be used to limit the
+    number of simultaneous parallel tasks.
+
+    Args:
+      task: The task (a functor) to run in the background.
+      queue: A queue to be used for managing communication between the parent
+        and child process. This queue must be valid for the length of the
+        life of the child process, until the parent has collected its status.
+      semaphore: The lock to hold while |task| runs.
+      task_args: A list of args to pass to the |task|.
+      task_kwargs: A dict of optional args to pass to the |task|.
+    """
+    multiprocessing.Process.__init__(self)
+    self._task = task
+    self._queue = queue
+    self._semaphore = semaphore
+    self._started = multiprocessing.Event()
+    self._killing = multiprocessing.Event()
+    self._output = None
+    self._parent_pid = None
+    self._task_args = task_args if task_args else ()
+    self._task_kwargs = task_kwargs if task_kwargs else {}
+
+  def _WaitForStartup(self):
+    # TODO(davidjames): Use python-2.7 syntax to simplify this.
+    self._started.wait(self.STARTUP_TIMEOUT)
+    msg = 'Process failed to start in %d seconds' % self.STARTUP_TIMEOUT
+    assert self._started.is_set(), msg
+
+  @classmethod
+  def _DebugRunCommand(cls, cmd, **kwargs):
+    """Swallow any exception run raises.
+
+    Since these commands are for purely informational purposes, we don't
+    random issues causing the bot to die.
+
+    Returns:
+      Stdout on success
+    """
+    log_level = kwargs['debug_level']
+    try:
+      with timeout_util.Timeout(cls.DEBUG_CMD_TIMEOUT):
+        return cros_build_lib.run(cmd, **kwargs).output
+    except (cros_build_lib.RunCommandError, timeout_util.TimeoutError) as e:
+      logging.log(log_level, 'Running %s failed: %s', cmd[0], str(e))
+      return ''
+
+  # Debug commands to run in gdb.  A class member so tests can stub it out.
+  GDB_COMMANDS = (
+      'info proc all',
+      'info threads',
+      'thread apply all py-list',
+      'thread apply all py-bt',
+      'thread apply all bt',
+      'detach',
+  )
+
+  @classmethod
+  def _DumpDebugPid(cls, log_level, pid):
+    """Dump debug info about the hanging |pid|."""
+    pid = str(pid)
+    commands = (
+        ('pstree', '-Apals', pid),
+        ('lsof', '-p', pid),
+    )
+    for cmd in commands:
+      cls._DebugRunCommand(cmd, debug_level=log_level, check=False,
+                           log_output=True, encoding='utf-8')
+
+    stdin = '\n'.join(['echo \\n>>> %s\\n\n%s' % (x, x)
+                       for x in cls.GDB_COMMANDS])
+    cmd = ('gdb', '--nx', '-q', '-p', pid, '-ex', 'set prompt',)
+    cls._DebugRunCommand(cmd, debug_level=log_level, check=False,
+                         log_output=True, input=stdin, encoding='utf-8')
+
+  def Kill(self, sig, log_level, first=False):
+    """Kill process with signal, ignoring if the process is dead.
+
+    Args:
+      sig: Signal to send.
+      log_level: The log level of log messages.
+      first: Whether this is the first signal we've sent.
+    """
+    self._killing.set()
+    self._WaitForStartup()
+    if logging.getLogger().isEnabledFor(log_level):
+      # Dump debug information about the hanging process.
+      logging.log(log_level, 'Killing %r (sig=%r %s)', self.pid, sig,
+                  signals.StrSignal(sig))
+
+      if first:
+        ppid = str(self.pid)
+        output = self._DebugRunCommand(
+            ('pgrep', '-P', ppid), debug_level=log_level, print_cmd=False,
+            check=False, capture_output=True)
+        for pid in [ppid] + output.splitlines():
+          self._DumpDebugPid(log_level, pid)
+
+    try:
+      os.kill(self.pid, sig)
+    except OSError as ex:
+      if ex.errno != errno.ESRCH:
+        raise
+
+  def Cleanup(self, silent=False):
+    """Wait for a process to exit."""
+    if os.getpid() != self._parent_pid or self._output is None:
+      return
+    try:
+      # Print output from subprocess.
+      if not silent and logging.getLogger().isEnabledFor(logging.DEBUG):
+        with open(self._output.name, 'r') as f:
+          for line in f:
+            logging.debug(line.rstrip('\n'))
+    finally:
+      # Clean up our temporary file.
+      osutils.SafeUnlink(self._output.name)
+      self._output.close()
+      self._output = None
+
+  def Wait(self):
+    """Wait for the task to complete.
+
+    Output from the task is printed as it runs.
+
+    If an exception occurs, return a string containing the traceback.
+    """
+    try:
+      # Flush stdout and stderr to be sure no output is interleaved.
+      sys.stdout.flush()
+      sys.stderr.flush()
+
+      # File position pointers are shared across processes, so we must open
+      # our own file descriptor to ensure output is not lost.
+      self._WaitForStartup()
+      silent_death_time = time.time() + self.SILENT_TIMEOUT
+      results = []
+      with open(self._output.name, 'r') as output:
+        pos = 0
+        running, exited_cleanly, task_errors, run_errors = (True, False, [], [])
+        while running:
+          # Check whether the process is still alive.
+          running = self.is_alive()
+
+          try:
+            errors, results = \
+                self._queue.get(True, self.PRINT_INTERVAL)
+            if errors:
+              task_errors.extend(errors)
+
+            running = False
+            exited_cleanly = True
+          except Queue.Empty:
+            pass
+
+          if not running:
+            # Wait for the process to actually exit. If the child doesn't exit
+            # in a timely fashion, kill it.
+            self.join(self.EXIT_TIMEOUT)
+            if self.exitcode is None:
+              msg = '%r hung for %r seconds' % (self, self.EXIT_TIMEOUT)
+              run_errors.extend(
+                  failures_lib.CreateExceptInfo(ProcessExitTimeout(msg), ''))
+              self._KillChildren([self])
+            elif not exited_cleanly:
+              msg = ('%r exited unexpectedly with code %s'
+                     % (self, self.exitcode))
+              run_errors.extend(
+                  failures_lib.CreateExceptInfo(ProcessUnexpectedExit(msg), ''))
+
+          # Read output from process.
+          output.seek(pos)
+          buf = output.read(_BUFSIZE)
+
+          if buf:
+            silent_death_time = time.time() + self.SILENT_TIMEOUT
+          elif running and time.time() > silent_death_time:
+            msg = ('No output from %r for %r seconds' %
+                   (self, self.SILENT_TIMEOUT))
+            run_errors.extend(
+                failures_lib.CreateExceptInfo(ProcessSilentTimeout(msg), ''))
+            self._KillChildren([self])
+
+            # Read remaining output from the process.
+            output.seek(pos)
+            buf = output.read(_BUFSIZE)
+            running = False
+
+          # Print output so far.
+          while buf:
+            sys.stdout.write(buf)
+            pos += len(buf)
+            if len(buf) < _BUFSIZE:
+              break
+            buf = output.read(_BUFSIZE)
+
+          # Print error messages if anything exceptional occurred.
+          if run_errors:
+            logging.PrintBuildbotStepFailure()
+            traceback.print_stack()
+            logging.warning('\n'.join(x.str for x in run_errors if x))
+            logging.info('\n'.join(x.str for x in task_errors if x))
+
+          sys.stdout.flush()
+          sys.stderr.flush()
+
+      # Propagate any results.
+      for result in results:
+        results_lib.Results.Record(*result)
+
+    finally:
+      self.Cleanup(silent=True)
+
+    # If an error occurred, return it.
+    return run_errors + task_errors
+
+  def start(self):
+    """Invoke multiprocessing.Process.start after flushing output/err."""
+    if self.SILENT_TIMEOUT < self.MINIMUM_SILENT_TIMEOUT:
+      raise AssertionError('Maximum recursion depth exceeded in %r' % self)
+
+    sys.stdout.flush()
+    sys.stderr.flush()
+    tmp_dir = '/tmp/chromite.parallel.%d' % os.geteuid()
+    osutils.SafeMakedirs(tmp_dir, mode=0o1777)
+    self._output = cros_build_lib.UnbufferedNamedTemporaryFile(
+        delete=False, dir=tmp_dir, prefix='chromite-parallel-')
+    self._parent_pid = os.getpid()
+    return multiprocessing.Process.start(self)
+
+  def run(self):
+    """Run the list of steps."""
+    if self._semaphore is not None:
+      self._semaphore.acquire()
+
+    errors = failures_lib.CreateExceptInfo(
+        UnexpectedException('Unexpected exception in %r' % self), '')
+    pid = os.getpid()
+    try:
+      errors = self._Run()
+    finally:
+      if not self._killing.is_set() and os.getpid() == pid:
+        results = results_lib.Results.Get()
+        self._queue.put((errors, results))
+        if self._semaphore is not None:
+          self._semaphore.release()
+
+  def _Run(self):
+    """Internal method for running the list of steps."""
+    # Register a handler for a signal that is rarely used.
+    def trigger_bt(_sig_num, frame):
+      logging.error('pre-kill notification (SIGXCPU); traceback:\n%s',
+                    ''.join(traceback.format_stack(frame)))
+    signal.signal(signal.SIGXCPU, trigger_bt)
+
+    sys.stdout.flush()
+    sys.stderr.flush()
+    errors = []
+    # Send all output to a named temporary file.
+    with open(self._output.name, 'wb', 0) as output:
+      # Back up sys.std{err,out}. These aren't used, but we keep a copy so
+      # that they aren't garbage collected. We intentionally don't restore
+      # the old stdout and stderr at the end, because we want shutdown errors
+      # to also be sent to the same log file.
+      _orig_stdout, _orig_stderr = sys.stdout, sys.stderr
+
+      # Replace std{out,err} with unbuffered file objects.
+      os.dup2(output.fileno(), sys.__stdout__.fileno())
+      os.dup2(output.fileno(), sys.__stderr__.fileno())
+      # The API of these funcs changed between versions.
+      if sys.version_info.major < 3:
+        sys.stdout = os.fdopen(sys.__stdout__.fileno(), 'w', 0)
+        sys.stderr = os.fdopen(sys.__stderr__.fileno(), 'w', 0)
+      else:
+        sys.stdout = os.fdopen(sys.__stdout__.fileno(), 'w', closefd=False)
+        sys.stderr = os.fdopen(sys.__stderr__.fileno(), 'w', closefd=False)
+
+      try:
+        self._started.set()
+        results_lib.Results.Clear()
+
+        # Reduce the silent timeout by the prescribed amount.
+        cls = self.__class__
+        cls.SILENT_TIMEOUT -= cls.SILENT_TIMEOUT_STEP
+
+        # Actually launch the task.
+        self._task(*self._task_args, **self._task_kwargs)
+      except failures_lib.StepFailure as ex:
+        errors.extend(failures_lib.CreateExceptInfo(
+            ex, traceback.format_exc()))
+      except BaseException as ex:
+        errors.extend(failures_lib.CreateExceptInfo(
+            ex, traceback.format_exc()))
+        if self._killing.is_set():
+          traceback.print_exc()
+      finally:
+        sys.stdout.flush()
+        sys.stderr.flush()
+
+    return errors
+
+  @classmethod
+  def _KillChildren(cls, bg_tasks, log_level=logging.WARNING):
+    """Kill a deque of background tasks.
+
+    This is needed to prevent hangs in the case where child processes refuse
+    to exit.
+
+    Args:
+      bg_tasks: A list filled with _BackgroundTask objects.
+      log_level: The log level of log messages.
+    """
+    logging.log(log_level, 'Killing tasks: %r', bg_tasks)
+    siglist = (
+        (signal.SIGXCPU, cls.SIGTERM_TIMEOUT),
+        (signal.SIGTERM, cls.SIGKILL_TIMEOUT),
+        (signal.SIGKILL, None),
+    )
+    first = True
+    for sig, timeout in siglist:
+      # Send signal to all tasks.
+      for task in bg_tasks:
+        task.Kill(sig, log_level, first)
+      first = False
+
+      # Wait for all tasks to exit, if requested.
+      if timeout is None:
+        for task in bg_tasks:
+          task.join()
+          task.Cleanup()
+        break
+
+      # Wait until timeout expires.
+      end_time = time.time() + timeout
+      while bg_tasks:
+        time_left = end_time - time.time()
+        if time_left <= 0:
+          break
+        task = bg_tasks[-1]
+        task.join(time_left)
+        if task.exitcode is not None:
+          task.Cleanup()
+          bg_tasks.pop()
+
+  @classmethod
+  @contextlib.contextmanager
+  def ParallelTasks(cls, steps, max_parallel=None, halt_on_error=False):
+    """Run a list of functions in parallel.
+
+    This function launches the provided functions in the background, yields,
+    and then waits for the functions to exit.
+
+    The output from the functions is saved to a temporary file and printed as if
+    they were run in sequence.
+
+    If exceptions occur in the steps, we join together the tracebacks and print
+    them after all parallel tasks have finished running. Further, a
+    BackgroundFailure is raised with full stack traces of all exceptions.
+
+    Args:
+      steps: A list of functions to run.
+      max_parallel: The maximum number of simultaneous tasks to run in parallel.
+        By default, run all tasks in parallel.
+      halt_on_error: After the first exception occurs, halt any running steps,
+        and squelch any further output, including any exceptions that might
+        occur.
+    """
+
+    semaphore = None
+    if max_parallel is not None:
+      semaphore = multiprocessing.Semaphore(max_parallel)
+
+    # First, start all the steps.
+    with Manager() as manager:
+      bg_tasks = collections.deque()
+      for step in steps:
+        task = cls(step, queue=manager.Queue(), semaphore=semaphore)
+        task.start()
+        bg_tasks.append(task)
+
+      foreground_except = None
+      try:
+        yield
+      except BaseException:
+        foreground_except = sys.exc_info()
+      finally:
+        errors = []
+        skip_bg_wait = halt_on_error and foreground_except is not None
+        # Wait for each step to complete.
+        while not skip_bg_wait and bg_tasks:
+          task = bg_tasks.popleft()
+          task_errors = task.Wait()
+          if task_errors:
+            errors.extend(task_errors)
+            if halt_on_error:
+              break
+
+        # If there are still tasks left, kill them.
+        if bg_tasks:
+          cls._KillChildren(bg_tasks, log_level=logging.DEBUG)
+
+        # Propagate any exceptions; foreground exceptions take precedence.
+        if foreground_except is not None:
+          # contextlib ignores caught exceptions unless explicitly re-raised.
+          six.reraise(foreground_except[0], foreground_except[1],
+                      foreground_except[2])
+        if errors:
+          raise BackgroundFailure(exc_infos=errors)
+
+  @staticmethod
+  def TaskRunner(queue, task, onexit=None, task_args=None, task_kwargs=None):
+    """Run task(*input) for each input in the queue.
+
+    Returns when it encounters an _AllTasksComplete object on the queue.
+    If exceptions occur, save them off and re-raise them as a
+    BackgroundFailure once we've finished processing the items in the queue.
+
+    Args:
+      queue: A queue of tasks to run. Add tasks to this queue, and they will
+        be run.
+      task: Function to run on each queued input.
+      onexit: Function to run after all inputs are processed.
+      task_args: A list of args to pass to the |task|.
+      task_kwargs: A dict of optional args to pass to the |task|.
+    """
+    if task_args is None:
+      task_args = []
+    elif not isinstance(task_args, list):
+      task_args = list(task_args)
+    if task_kwargs is None:
+      task_kwargs = {}
+
+    errors = []
+    while True:
+      # Wait for a new item to show up on the queue. This is a blocking wait,
+      # so if there's nothing to do, we just sit here.
+      x = queue.get()
+      if isinstance(x, _AllTasksComplete):
+        # All tasks are complete, so we should exit.
+        break
+      elif not isinstance(x, list):
+        x = task_args + list(x)
+      else:
+        x = task_args + x
+
+      # If no tasks failed yet, process the remaining tasks.
+      if not errors:
+        try:
+          task(*x, **task_kwargs)
+        except BaseException as ex:
+          errors.extend(
+              failures_lib.CreateExceptInfo(ex, traceback.format_exc()))
+
+    # Run exit handlers.
+    if onexit:
+      onexit()
+
+    # Propagate any exceptions.
+    if errors:
+      raise BackgroundFailure(exc_infos=errors)
+
+
+def RunParallelSteps(steps, max_parallel=None, halt_on_error=False,
+                     return_values=False):
+  """Run a list of functions in parallel.
+
+  This function blocks until all steps are completed.
+
+  The output from the functions is saved to a temporary file and printed as if
+  they were run in sequence.
+
+  If exceptions occur in the steps, we join together the tracebacks and print
+  them after all parallel tasks have finished running. Further, a
+  BackgroundFailure is raised with full stack traces of all exceptions.
+
+  Examples:
+    # This snippet will execute in parallel:
+    #   somefunc()
+    #   anotherfunc()
+    #   funcfunc()
+    steps = [somefunc, anotherfunc, funcfunc]
+    RunParallelSteps(steps)
+    # Blocks until all calls have completed.
+
+  Args:
+    steps: A list of functions to run.
+    max_parallel: The maximum number of simultaneous tasks to run in parallel.
+      By default, run all tasks in parallel.
+    halt_on_error: After the first exception occurs, halt any running steps,
+      and squelch any further output, including any exceptions that might occur.
+    return_values: If set to True, RunParallelSteps returns a list containing
+      the return values of the steps.  Defaults to False.
+
+  Returns:
+    If |return_values| is True, the function will return a list containing the
+    return values of the steps.
+  """
+  def ReturnWrapper(queue, fn):
+    """Put the return value of |fn| into |queue|."""
+    queue.put(fn())
+
+  full_steps = []
+  queues = []
+  with cros_build_lib.ContextManagerStack() as stack:
+    if return_values:
+      # We use a managed queue here, because the child process will wait for the
+      # queue(pipe) to be flushed (i.e., when items are read from the queue)
+      # before exiting, and with a regular queue this may result in hangs for
+      # large return values.  But with a managed queue, the manager process will
+      # read the items and hold on to them until the managed queue goes out of
+      # scope and is cleaned up.
+      manager = stack.Add(Manager)
+      for step in steps:
+        queue = manager.Queue()
+        queues.append(queue)
+        full_steps.append(functools.partial(ReturnWrapper, queue, step))
+    else:
+      full_steps = steps
+
+    with _BackgroundTask.ParallelTasks(full_steps, max_parallel=max_parallel,
+                                       halt_on_error=halt_on_error):
+      pass
+
+    if return_values:
+      return [queue.get_nowait() for queue in queues]
+
+
+class _AllTasksComplete(object):
+  """Sentinel object to indicate that all tasks are complete."""
+
+
[email protected]
+def BackgroundTaskRunner(task, *args, **kwargs):
+  """Run the specified task on each queued input in a pool of processes.
+
+  This context manager starts a set of workers in the background, who each
+  wait for input on the specified queue. For each input on the queue, these
+  workers run task(*args + *input, **kwargs). Note that certain kwargs will
+  not pass through to the task (see Args below for the list).
+
+  The output from these tasks is saved to a temporary file. When control
+  returns to the context manager, the background output is printed in order,
+  as if the tasks were run in sequence.
+
+  If exceptions occur in the steps, we join together the tracebacks and print
+  them after all parallel tasks have finished running. Further, a
+  BackgroundFailure is raised with full stack traces of all exceptions.
+
+  Examples:
+    # This will run somefunc(1, 'small', 'cow', foo='bar') in the background
+    # as soon as data is added to the queue (i.e. queue.put() is called).
+
+    def somefunc(arg1, arg2, arg3, foo=None):
+      ...
+
+    with BackgroundTaskRunner(somefunc, 1, foo='bar') as queue:
+      ... do random stuff ...
+      queue.put(['small', 'cow'])
+      ... do more random stuff while somefunc() runs ...
+    # Exiting the with statement will block until all calls have completed.
+
+  Args:
+    task: Function to run on each queued input.
+    queue: A queue of tasks to run. Add tasks to this queue, and they will
+      be run in the background.  If None, one will be created on the fly.
+    processes: Number of processes to launch.
+    onexit: Function to run in each background process after all inputs are
+      processed.
+    halt_on_error: After the first exception occurs, halt any running steps, and
+      squelch any further output, including any exceptions that might occur.
+      Halts on exceptions in any of the background processes, or in the
+      foreground process using the BackgroundTaskRunner.
+  """
+
+  queue = kwargs.pop('queue', None)
+  processes = kwargs.pop('processes', None)
+  onexit = kwargs.pop('onexit', None)
+  halt_on_error = kwargs.pop('halt_on_error', False)
+
+  with cros_build_lib.ContextManagerStack() as stack:
+    if queue is None:
+      manager = stack.Add(Manager)
+      queue = manager.Queue()
+
+    if not processes:
+      processes = multiprocessing.cpu_count()
+
+    child = functools.partial(_BackgroundTask.TaskRunner, queue, task,
+                              onexit=onexit, task_args=args,
+                              task_kwargs=kwargs)
+    steps = [child] * processes
+    with _BackgroundTask.ParallelTasks(steps, halt_on_error=halt_on_error):
+      try:
+        yield queue
+      finally:
+        for _ in range(processes):
+          queue.put(_AllTasksComplete())
+
+
+def RunTasksInProcessPool(task, inputs, processes=None, onexit=None):
+  """Run the specified function with each supplied input in a pool of processes.
+
+  This function runs task(*x) for x in inputs in a pool of processes. This
+  function blocks until all tasks are completed.
+
+  The output from these tasks is saved to a temporary file. When control
+  returns to the context manager, the background output is printed in order,
+  as if the tasks were run in sequence.
+
+  If exceptions occur in the steps, we join together the tracebacks and print
+  them after all parallel tasks have finished running. Further, a
+  BackgroundFailure is raised with full stack traces of all exceptions.
+
+  Examples:
+    # This snippet will execute in parallel:
+    #   somefunc('hi', 'fat', 'code')
+    #   somefunc('foo', 'bar', 'cow')
+
+    def somefunc(arg1, arg2, arg3):
+      ...
+    ...
+    inputs = [
+      ['hi', 'fat', 'code'],
+      ['foo', 'bar', 'cow'],
+    ]
+    RunTasksInProcessPool(somefunc, inputs)
+    # Blocks until all calls have completed.
+
+  Args:
+    task: Function to run on each input.
+    inputs: List of inputs.
+    processes: Number of processes, at most, to launch.
+    onexit: Function to run in each background process after all inputs are
+      processed.
+
+  Returns:
+    Returns a list containing the return values of the task for each input.
+  """
+  if not processes:
+    # - Use >=16 processes by default, in case it's a network-bound operation.
+    # - Try to use all of the CPUs, in case it's a CPU-bound operation.
+    processes = min(max(16, multiprocessing.cpu_count()), len(inputs))
+
+  with Manager() as manager:
+    # Set up output queue.
+    out_queue = manager.Queue()
+    fn = lambda idx, task_args: out_queue.put((idx, task(*task_args)))
+
+    # Micro-optimization: Setup the queue so that BackgroundTaskRunner
+    # doesn't have to set up another Manager process.
+    queue = manager.Queue()
+
+    with BackgroundTaskRunner(fn, queue=queue, processes=processes,
+                              onexit=onexit) as queue:
+      for idx, input_args in enumerate(inputs):
+        queue.put((idx, input_args))
+
+    return [x[1] for x in sorted(out_queue.get() for _ in range(len(inputs)))]
+
+
+PR_SET_PDEATHSIG = 1
+
+
+def ExitWithParent(sig=signal.SIGHUP):
+  """Sets this process to receive |sig| when the parent dies.
+
+  Note: this uses libc, so it only works on linux.
+
+  Args:
+    sig: Signal to recieve. Defaults to SIGHUP.
+
+  Returns:
+    Whether we were successful in setting the deathsignal flag
+  """
+  libc_name = ctypes.util.find_library('c')
+  if not libc_name:
+    return False
+  try:
+    libc = ctypes.CDLL(libc_name)
+    libc.prctl(PR_SET_PDEATHSIG, sig)
+    return True
+  # We might not be able to load the library (OSError), or prctl might be
+  # missing (AttributeError)
+  except (OSError, AttributeError):
+    return False
diff --git a/utils/frozen_chromite/lib/path_util.py b/utils/frozen_chromite/lib/path_util.py
new file mode 100644
index 0000000..ef2a178
--- /dev/null
+++ b/utils/frozen_chromite/lib/path_util.py
@@ -0,0 +1,365 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Handle path inference and translation."""
+
+from __future__ import print_function
+
+import collections
+import os
+import tempfile
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import git
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.utils import memoize
+
+
+GENERAL_CACHE_DIR = '.cache'
+CHROME_CACHE_DIR = 'cros_cache'
+
+CHECKOUT_TYPE_UNKNOWN = 'unknown'
+CHECKOUT_TYPE_GCLIENT = 'gclient'
+CHECKOUT_TYPE_REPO = 'repo'
+
+CheckoutInfo = collections.namedtuple(
+    'CheckoutInfo', ['type', 'root', 'chrome_src_dir'])
+
+
+class ChrootPathResolver(object):
+  """Perform path resolution to/from the chroot.
+
+  Attributes:
+    source_path: Value to override default source root inference.
+    source_from_path_repo: Whether to infer the source root from the converted
+      path's repo parent during inbound translation; overrides |source_path|.
+  """
+
+  # TODO(garnold) We currently infer the source root based on the path's own
+  # encapsulating repository. This is a heuristic catering to paths are being
+  # translated to be used in a chroot that's not associated with the currently
+  # executing code (for example, cbuildbot run on a build root or a foreign
+  # tree checkout). This approach might result in arbitrary repo-contained
+  # paths being translated to invalid chroot paths where they actually should
+  # not, and other valid source paths failing to translate because they are not
+  # repo-contained. Eventually we'll want to make this behavior explicit, by
+  # either passing a source_root value, or requesting to infer it from the path
+  # (source_from_path_repo=True), but otherwise defaulting to the executing
+  # code's source root in the normal case. When that happens, we'll be
+  # switching source_from_path_repo to False by default. See chromium:485746.
+
+  def __init__(self, source_path=None, source_from_path_repo=True):
+    self._inside_chroot = cros_build_lib.IsInsideChroot()
+    self._source_path = (constants.SOURCE_ROOT if source_path is None
+                         else source_path)
+    self._source_from_path_repo = source_from_path_repo
+
+    # The following are only needed if outside the chroot.
+    if self._inside_chroot:
+      self._chroot_path = None
+      self._chroot_link = None
+      self._chroot_to_host_roots = None
+    else:
+      self._chroot_path = self._GetSourcePathChroot(self._source_path)
+      # The chroot link allows us to resolve paths when the chroot is symlinked
+      # to the default location. This is generally not used, but it is useful
+      # for CI for optimization purposes. We will trust them not to do something
+      # dumb, like symlink to /, but this doesn't enable that kind of behavior
+      # anyway, just allows resolving paths correctly from outside the chroot.
+      self._chroot_link = self._ReadChrootLink(self._chroot_path)
+
+      # Initialize mapping of known root bind mounts.
+      self._chroot_to_host_roots = (
+          (constants.CHROOT_SOURCE_ROOT, self._source_path),
+          (constants.CHROOT_CACHE_ROOT, self._GetCachePath),
+      )
+
+  @classmethod
+  @memoize.MemoizedSingleCall
+  def _GetCachePath(cls):
+    """Returns the cache directory."""
+    return os.path.realpath(GetCacheDir())
+
+  def _GetSourcePathChroot(self, source_path):
+    """Returns path to the chroot directory of a given source root."""
+    if source_path is None:
+      return None
+    return os.path.join(source_path, constants.DEFAULT_CHROOT_DIR)
+
+  def _ReadChrootLink(self, path):
+    """Convert a chroot symlink to its absolute path.
+
+    This contains defaults/edge cases assumptions for chroot paths. Not
+    recommended for non-chroot paths.
+
+    Args:
+      path (str|None): The path to resolve.
+
+    Returns:
+      str|None: The resolved path if the provided path is a symlink, None
+        otherwise.
+    """
+    # Mainly for the "if self._source_from_path_repo:" branch in _GetChrootPath.
+    # _GetSourcePathChroot can return None, so double check it here.
+    if not path:
+      return None
+
+    abs_path = os.path.abspath(path)
+    link = osutils.ResolveSymlink(abs_path)
+
+    # ResolveSymlink returns the passed path when the path isn't a symlink. We
+    # can skip some redundant work when its falling back on the link when the
+    # chroot is not a symlink.
+    if link == abs_path:
+      return None
+
+    return link
+
+  def _TranslatePath(self, path, src_root, dst_root_input):
+    """If |path| starts with |src_root|, replace it using |dst_root_input|.
+
+    Args:
+      path: An absolute path we want to convert to a destination equivalent.
+      src_root: The root that path needs to be contained in.
+      dst_root_input: The root we want to relocate the relative path into, or a
+        function returning this value.
+
+    Returns:
+      A translated path, or None if |src_root| is not a prefix of |path|.
+
+    Raises:
+      ValueError: If |src_root| is a prefix but |dst_root_input| yields None,
+        which means we don't have sufficient information to do the translation.
+    """
+    if not path.startswith(os.path.join(src_root, '')) and path != src_root:
+      return None
+    dst_root = dst_root_input() if callable(dst_root_input) else dst_root_input
+    if dst_root is None:
+      raise ValueError('No target root to translate path to')
+    return os.path.join(dst_root, path[len(src_root):].lstrip(os.path.sep))
+
+  def _GetChrootPath(self, path):
+    """Translates a fully-expanded host |path| into a chroot equivalent.
+
+    This checks path prefixes in order from the most to least "contained": the
+    chroot itself, then the cache directory, and finally the source tree. The
+    idea is to return the shortest possible chroot equivalent.
+
+    Args:
+      path: A host path to translate.
+
+    Returns:
+      An equivalent chroot path.
+
+    Raises:
+      ValueError: If |path| is not reachable from the chroot.
+    """
+    new_path = None
+
+    # Preliminary: compute the actual source and chroot paths to use. These are
+    # generally the precomputed values, unless we're inferring the source root
+    # from the path itself.
+    source_path = self._source_path
+    chroot_path = self._chroot_path
+    chroot_link = self._chroot_link
+
+    if self._source_from_path_repo:
+      path_repo_dir = git.FindRepoDir(path)
+      if path_repo_dir is not None:
+        source_path = os.path.abspath(os.path.join(path_repo_dir, '..'))
+      chroot_path = self._GetSourcePathChroot(source_path)
+      chroot_link = self._ReadChrootLink(chroot_path)
+
+    # First, check if the path happens to be in the chroot already.
+    if chroot_path is not None:
+      new_path = self._TranslatePath(path, chroot_path, '/')
+      # Or in the symlinked dir.
+      if new_path is None and chroot_link is not None:
+        new_path = self._TranslatePath(path, chroot_link, '/')
+
+    # Second, check the cache directory.
+    if new_path is None:
+      new_path = self._TranslatePath(path, self._GetCachePath(),
+                                     constants.CHROOT_CACHE_ROOT)
+
+    # Finally, check the current SDK checkout tree.
+    if new_path is None and source_path is not None:
+      new_path = self._TranslatePath(path, source_path,
+                                     constants.CHROOT_SOURCE_ROOT)
+
+    if new_path is None:
+      raise ValueError('Path is not reachable from the chroot')
+
+    return new_path
+
+  def _GetHostPath(self, path):
+    """Translates a fully-expanded chroot |path| into a host equivalent.
+
+    We first attempt translation of known roots (source). If any is successful,
+    we check whether the result happens to point back to the chroot, in which
+    case we trim the chroot path prefix and recurse. If neither was successful,
+    just prepend the chroot path.
+
+    Args:
+      path: A chroot path to translate.
+
+    Returns:
+      An equivalent host path.
+
+    Raises:
+      ValueError: If |path| could not be mapped to a proper host destination.
+    """
+    new_path = None
+
+    # Attempt resolution of known roots.
+    for src_root, dst_root in self._chroot_to_host_roots:
+      new_path = self._TranslatePath(path, src_root, dst_root)
+      if new_path is not None:
+        break
+
+    if new_path is None:
+      # If no known root was identified, just prepend the chroot path.
+      new_path = self._TranslatePath(path, '', self._chroot_path)
+    else:
+      # Check whether the resolved path happens to point back at the chroot, in
+      # which case trim the chroot path or link prefix and continue recursively.
+      path = self._TranslatePath(new_path, self._chroot_path, '/')
+      if path is None and self._chroot_link:
+        path = self._TranslatePath(new_path, self._chroot_link, '/')
+
+      if path is not None:
+        new_path = self._GetHostPath(path)
+
+    return new_path
+
+  def _ConvertPath(self, path, get_converted_path):
+    """Expands |path|; if outside the chroot, applies |get_converted_path|.
+
+    Args:
+      path: A path to be converted.
+      get_converted_path: A conversion function.
+
+    Returns:
+      An expanded and (if needed) converted path.
+
+    Raises:
+      ValueError: If path conversion failed.
+    """
+    # NOTE: We do not want to expand wrapper script symlinks because this
+    # prevents them from working. Therefore, if the path points to a file we
+    # only resolve its dirname but leave the basename intact. This means our
+    # path resolution might return unusable results for file symlinks that
+    # point outside the reachable space. These are edge cases in which the user
+    # is expected to resolve the realpath themselves in advance.
+    expanded_path = os.path.expanduser(path)
+    if os.path.isfile(expanded_path):
+      expanded_path = os.path.join(
+          os.path.realpath(os.path.dirname(expanded_path)),
+          os.path.basename(expanded_path))
+    else:
+      expanded_path = os.path.realpath(expanded_path)
+
+    if self._inside_chroot:
+      return expanded_path
+
+    try:
+      return get_converted_path(expanded_path)
+    except ValueError as e:
+      raise ValueError('%s: %s' % (e, path))
+
+  def ToChroot(self, path):
+    """Resolves current environment |path| for use in the chroot."""
+    return self._ConvertPath(path, self._GetChrootPath)
+
+  def FromChroot(self, path):
+    """Resolves chroot |path| for use in the current environment."""
+    return self._ConvertPath(path, self._GetHostPath)
+
+
+def DetermineCheckout(cwd=None):
+  """Gather information on the checkout we are in.
+
+  There are several checkout types, as defined by CHECKOUT_TYPE_XXX variables.
+  This function determines what checkout type |cwd| is in, for example, if |cwd|
+  belongs to a `repo` checkout.
+
+  Returns:
+    A CheckoutInfo object with these attributes:
+      type: The type of checkout.  Valid values are CHECKOUT_TYPE_*.
+      root: The root of the checkout.
+      chrome_src_dir: If the checkout is a Chrome checkout, the path to the
+        Chrome src/ directory.
+  """
+  checkout_type = CHECKOUT_TYPE_UNKNOWN
+  root, path = None, None
+
+  cwd = cwd or os.getcwd()
+  for path in osutils.IteratePathParents(cwd):
+    gclient_file = os.path.join(path, '.gclient')
+    if os.path.exists(gclient_file):
+      checkout_type = CHECKOUT_TYPE_GCLIENT
+      break
+    repo_dir = os.path.join(path, '.repo')
+    if os.path.isdir(repo_dir):
+      checkout_type = CHECKOUT_TYPE_REPO
+      break
+
+  if checkout_type != CHECKOUT_TYPE_UNKNOWN:
+    root = path
+
+  # Determine the chrome src directory.
+  chrome_src_dir = None
+  if checkout_type == CHECKOUT_TYPE_GCLIENT:
+    chrome_src_dir = os.path.join(root, 'src')
+
+  return CheckoutInfo(checkout_type, root, chrome_src_dir)
+
+
+def FindCacheDir():
+  """Returns the cache directory location based on the checkout type."""
+  checkout = DetermineCheckout()
+  if checkout.type == CHECKOUT_TYPE_REPO:
+    return os.path.join(checkout.root, GENERAL_CACHE_DIR)
+  elif checkout.type == CHECKOUT_TYPE_GCLIENT:
+    return os.path.join(checkout.chrome_src_dir, 'build', CHROME_CACHE_DIR)
+  elif checkout.type == CHECKOUT_TYPE_UNKNOWN:
+    return os.path.join(tempfile.gettempdir(), 'chromeos-cache')
+  else:
+    raise AssertionError('Unexpected type %s' % checkout.type)
+
+
+def GetCacheDir():
+  """Returns the current cache dir."""
+  return os.environ.get(constants.SHARED_CACHE_ENVVAR, FindCacheDir())
+
+
+def ToChrootPath(path, source_path=None):
+  """Resolves current environment |path| for use in the chroot.
+
+  Args:
+    path: string path to translate into chroot namespace.
+    source_path: string path to root of source checkout with chroot in it.
+
+  Returns:
+    The same path converted to "inside chroot" namespace.
+
+  Raises:
+    ValueError: If the path references a location not available in the chroot.
+  """
+  return ChrootPathResolver(source_path=source_path).ToChroot(path)
+
+
+def FromChrootPath(path, source_path=None):
+  """Resolves chroot |path| for use in the current environment.
+
+  Args:
+    path: string path to translate out of chroot namespace.
+    source_path: string path to root of source checkout with chroot in it.
+
+  Returns:
+    The same path converted to "outside chroot" namespace.
+  """
+  return ChrootPathResolver(source_path=source_path).FromChroot(path)
diff --git a/utils/frozen_chromite/lib/remote_access.py b/utils/frozen_chromite/lib/remote_access.py
new file mode 100644
index 0000000..e59b5a0
--- /dev/null
+++ b/utils/frozen_chromite/lib/remote_access.py
@@ -0,0 +1,1389 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Library containing functions to access a remote test device."""
+
+from __future__ import print_function
+
+import glob
+import os
+import re
+import shutil
+import socket
+import stat
+import subprocess
+import tempfile
+import time
+
+import six
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import osutils
+from autotest_lib.utils.frozen_chromite.lib import parallel
+from autotest_lib.utils.frozen_chromite.lib import timeout_util
+from autotest_lib.utils.frozen_chromite.scripts import cros_set_lsb_release
+from autotest_lib.utils.frozen_chromite.utils import memoize
+
+
+_path = os.path.dirname(os.path.realpath(__file__))
+TEST_PRIVATE_KEY = os.path.normpath(
+    os.path.join(_path, '../ssh_keys/testing_rsa'))
+del _path
+
+CHUNK_SIZE = 50 * 1024 * 1024
+DEGREE_OF_PARALLELISM = 8
+LOCALHOST = 'localhost'
+LOCALHOST_IP = '127.0.0.1'
+ROOT_ACCOUNT = 'root'
+
+# IP used for testing that is a valid IP address, but would fail quickly if
+# actually used for any real operation (e.g. pinging or making connections).
+# https://en.wikipedia.org/wiki/IPv4#Special-use_addresses
+TEST_IP = '0.1.2.3'
+
+REBOOT_MAX_WAIT = 180
+REBOOT_SSH_CONNECT_TIMEOUT = 2
+REBOOT_SSH_CONNECT_ATTEMPTS = 2
+CHECK_INTERVAL = 5
+DEFAULT_SSH_PORT = 22
+# Ssh returns status 255 when it encounters errors in its own code.  Otherwise
+# it returns the status of the command that it ran on the host, including
+# possibly 255.  Here we assume that 255 indicates only ssh errors.  This may
+# be a reasonable guess for our purposes.
+SSH_ERROR_CODE = 255
+
+# SSH default known_hosts filepath.
+KNOWN_HOSTS_PATH = os.path.expanduser('~/.ssh/known_hosts')
+
+# Dev/test packages are installed in these paths.
+DEV_BIN_PATHS = '/usr/local/bin:/usr/local/sbin'
+
+
+class RemoteAccessException(Exception):
+  """Base exception for this module."""
+
+
+class SSHConnectionError(RemoteAccessException):
+  """Raised when SSH connection has failed."""
+
+  def IsKnownHostsMismatch(self):
+    """Returns True if this error was caused by a known_hosts mismatch.
+
+    Will only check for a mismatch, this will return False if the host
+    didn't exist in known_hosts at all.
+    """
+    # Checking for string output is brittle, but there's no exit code that
+    # indicates why SSH failed so this might be the best we can do.
+    # RemoteAccess.RemoteSh() sets LC_MESSAGES=C so we only need to check for
+    # the English error message.
+    # Verified for OpenSSH_6.6.1p1.
+    return 'REMOTE HOST IDENTIFICATION HAS CHANGED' in str(self)
+
+
+class DeviceNotPingableError(RemoteAccessException):
+  """Raised when device is not pingable."""
+
+
+class DefaultDeviceError(RemoteAccessException):
+  """Raised when a default ChromiumOSDevice can't be found."""
+
+
+class CatFileError(RemoteAccessException):
+  """Raised when error occurs while trying to cat a remote file."""
+
+
+class RunningPidsError(RemoteAccessException):
+  """Raised when unable to get running pids on the device."""
+
+
+def NormalizePort(port, str_ok=True):
+  """Checks if |port| is a valid port number and returns the number.
+
+  Args:
+    port: The port to normalize.
+    str_ok: Accept |port| in string. If set False, only accepts
+      an integer. Defaults to True.
+
+  Returns:
+    A port number (integer).
+  """
+  err_msg = '%s is not a valid port number.' % port
+
+  if not str_ok and not isinstance(port, int):
+    raise ValueError(err_msg)
+
+  port = int(port)
+  if port <= 0 or port >= 65536:
+    raise ValueError(err_msg)
+
+  return port
+
+
+def GetUnusedPort(ip=LOCALHOST, family=socket.AF_INET,
+                  stype=socket.SOCK_STREAM):
+  """Returns a currently unused port.
+
+  Examples:
+    Note: Since this does not guarantee the port remains unused when you
+    attempt to bind it, your code should retry in a loop like so:
+    while True:
+      try:
+        port = remote_access.GetUnusedPort()
+        <attempt to bind the port>
+        break
+      except socket.error as e:
+        if e.errno == errno.EADDRINUSE:
+          continue
+        <fallback/raise>
+
+  Args:
+    ip: IP to use to bind the port.
+    family: Address family.
+    stype: Socket type.
+
+  Returns:
+    A port number (integer).
+  """
+  s = None
+  try:
+    s = socket.socket(family, stype)
+    s.bind((ip, 0))
+    return s.getsockname()[1]
+  # TODO(vapier): Drop socket.error when we're Python 3-only.
+  # pylint: disable=overlapping-except
+  except (socket.error, OSError):
+    pass
+  finally:
+    if s is not None:
+      s.close()
+
+
+def RunCommandFuncWrapper(func, msg, *args, **kwargs):
+  """Wraps a function that invokes cros_build_lib.run.
+
+  If the command failed, logs warning |msg| if check is not set;
+  logs error |msg| if check is set.
+
+  Args:
+    func: The function to call.
+    msg: The message to display if the command failed.
+    ignore_failures: If True, ignore failures during the command.
+    *args: Arguments to pass to |func|.
+    **kwargs: Keyword arguments to pass to |func|.
+
+  Returns:
+    The result of |func|.
+
+  Raises:
+    cros_build_lib.RunCommandError if the command failed and check is set.
+  """
+  check = kwargs.pop('check', True)
+  ignore_failures = kwargs.pop('ignore_failures', False)
+  result = func(*args, check=False, **kwargs)
+
+  if not ignore_failures:
+    if result.returncode != 0 and check:
+      raise cros_build_lib.RunCommandError(msg, result)
+
+    if result.returncode != 0:
+      logging.warning(msg)
+
+
+def CompileSSHConnectSettings(**kwargs):
+  """Creates a list of SSH connection options.
+
+  Any ssh_config option can be specified in |kwargs|, in addition,
+  several options are set to default values if not specified. Any
+  option can be set to None to prevent this function from assigning
+  a value so that the SSH default value will be used.
+
+  This function doesn't check to make sure the |kwargs| options are
+  valid, so a typo or invalid setting won't be caught until the
+  resulting arguments are passed into an SSH call.
+
+  Args:
+    kwargs: A dictionary of ssh_config settings.
+
+  Returns:
+    A list of arguments to pass to SSH.
+  """
+  settings = {
+      'ConnectTimeout': 30,
+      'ConnectionAttempts': 4,
+      'NumberOfPasswordPrompts': 0,
+      'Protocol': 2,
+      'ServerAliveInterval': 10,
+      'ServerAliveCountMax': 3,
+      'StrictHostKeyChecking': 'no',
+      'UserKnownHostsFile': '/dev/null',
+  }
+  settings.update(kwargs)
+  return ['-o%s=%s' % (k, v) for k, v in settings.items() if v is not None]
+
+
+def RemoveKnownHost(host, known_hosts_path=KNOWN_HOSTS_PATH):
+  """Removes |host| from a known_hosts file.
+
+  `ssh-keygen -R` doesn't work on bind mounted files as they can only
+  be updated in place. Since we bind mount the default known_hosts file
+  when entering the chroot, this function provides an alternate way
+  to remove hosts from the file.
+
+  Args:
+    host: The host name to remove from the known_hosts file.
+    known_hosts_path: Path to the known_hosts file to change. Defaults
+                      to the standard SSH known_hosts file path.
+
+  Raises:
+    cros_build_lib.RunCommandError if ssh-keygen fails.
+  """
+  # `ssh-keygen -R` creates a backup file to retain the old 'known_hosts'
+  # content and never deletes it. Using TempDir here to make sure both the temp
+  # files created by us and `ssh-keygen -R` are deleted afterwards.
+  with osutils.TempDir(prefix='remote-access-') as tempdir:
+    temp_file = os.path.join(tempdir, 'temp_known_hosts')
+    try:
+      # Using shutil.copy2 to preserve the file ownership and permissions.
+      shutil.copy2(known_hosts_path, temp_file)
+    except IOError:
+      # If |known_hosts_path| doesn't exist neither does |host| so we're done.
+      return
+    cros_build_lib.run(['ssh-keygen', '-R', host, '-f', temp_file], quiet=True)
+    shutil.copy2(temp_file, known_hosts_path)
+
+
+class PortForwardSpec(object):
+  """Represent the information required to define an SSH tunnel."""
+
+  def __init__(self, local_port, remote_host='localhost', remote_port=None,
+               local_host='localhost'):
+    if remote_port is None:
+      remote_port = local_port
+    self.local_port = NormalizePort(local_port)
+    self.remote_port = NormalizePort(remote_port)
+    self.local_host = local_host
+    self.remote_host = remote_host
+
+  @property
+  def command_line_spec(self):
+    """Return the port forwarding spec for the `ssh` command."""
+    if not self.remote_host:
+      return '%d:%s:%d' % (self.remote_port, self.local_host, self.local_port)
+    return '%s:%d:%s:%d' % (self.remote_host, self.remote_port, self.local_host,
+                            self.local_port)
+
+
+class RemoteAccess(object):
+  """Provides access to a remote test machine."""
+
+  DEFAULT_USERNAME = ROOT_ACCOUNT
+
+  def __init__(self, remote_host, tempdir, port=None, username=None,
+               private_key=None, debug_level=logging.DEBUG, interactive=True):
+    """Construct the object.
+
+    Args:
+      remote_host: The ip or hostname of the remote test machine.  The test
+                   machine should be running a ChromeOS test image.
+      tempdir: A directory that RemoteAccess can use to store temporary files.
+               It's the responsibility of the caller to remove it.
+      port: The ssh port of the test machine to connect to.
+      username: The ssh login username (default: root).
+      private_key: The identify file to pass to `ssh -i` (default: testing_rsa).
+      debug_level: Logging level to use for all run invocations.
+      interactive: If set to False, pass /dev/null into stdin for the sh cmd.
+    """
+    self.tempdir = tempdir
+    self.remote_host = remote_host
+    self.port = port
+    self.username = username if username else self.DEFAULT_USERNAME
+    self.debug_level = debug_level
+    private_key_src = private_key if private_key else TEST_PRIVATE_KEY
+    self.private_key = os.path.join(
+        tempdir, os.path.basename(private_key_src))
+
+    self.interactive = interactive
+    shutil.copyfile(private_key_src, self.private_key)
+    os.chmod(self.private_key, stat.S_IRUSR)
+
+  @staticmethod
+  def _mockable_popen(*args, **kwargs):
+    """This wraps subprocess.Popen so it can be mocked in unit tests."""
+    return subprocess.Popen(*args, **kwargs)
+
+  @property
+  def target_ssh_url(self):
+    return '%s@%s' % (self.username, self.remote_host)
+
+  def _GetSSHCmd(self, connect_settings=None):
+    if connect_settings is None:
+      connect_settings = CompileSSHConnectSettings()
+
+    cmd = ['ssh']
+    if self.port:
+      cmd += ['-p', str(self.port)]
+    cmd += connect_settings
+    cmd += ['-oIdentitiesOnly=yes', '-i', self.private_key]
+    if not self.interactive:
+      cmd.append('-n')
+
+    return cmd
+
+  def GetSSHCommand(self, connect_settings=None):
+    """Returns the ssh command that can be used to connect to the device
+
+    Args:
+      connect_settings: dict of additional ssh options
+
+    Returns:
+      ['ssh', '...', 'user@host']
+    """
+    ssh_cmd = self._GetSSHCmd(connect_settings=connect_settings)
+    ssh_cmd.append(self.target_ssh_url)
+
+    return ssh_cmd
+
+  def RemoteSh(self, cmd, connect_settings=None, check=True,
+               remote_sudo=False, remote_user=None, ssh_error_ok=False,
+               **kwargs):
+    """Run a sh command on the remote device through ssh.
+
+    Args:
+      cmd: The command string or list to run. None or empty string/list will
+           start an interactive session.
+      connect_settings: The SSH connect settings to use.
+      check: Throw an exception when the command exits with a non-zero
+             returncode.  This does not cover the case where the ssh command
+             itself fails (return code 255).  See ssh_error_ok.
+      ssh_error_ok: Does not throw an exception when the ssh command itself
+                    fails (return code 255).
+      remote_sudo: If set, run the command in remote shell with sudo.
+      remote_user: If set, run the command as the specified user.
+      **kwargs: See cros_build_lib.run documentation.
+
+    Returns:
+      A CommandResult object.  The returncode is the returncode of the command,
+      or 255 if ssh encountered an error (could not connect, connection
+      interrupted, etc.)
+
+    Raises:
+      RunCommandError when error is not ignored through the check flag.
+      SSHConnectionError when ssh command error is not ignored through
+      the ssh_error_ok flag.
+    """
+    kwargs.setdefault('capture_output', True)
+    kwargs.setdefault('encoding', 'utf-8')
+    kwargs.setdefault('debug_level', self.debug_level)
+    # Force English SSH messages. SSHConnectionError.IsKnownHostsMismatch()
+    # requires English errors to detect a known_hosts key mismatch error.
+    kwargs.setdefault('extra_env', {})['LC_MESSAGES'] = 'C'
+
+    prev_user = self.username
+    if remote_user:
+      self.username = remote_user
+
+    ssh_cmd = self.GetSSHCommand(connect_settings=connect_settings)
+
+    if cmd:
+      ssh_cmd.append('--')
+
+      if remote_sudo and self.username != ROOT_ACCOUNT:
+        # Prepend sudo to cmd.
+        ssh_cmd.append('sudo')
+
+      if isinstance(cmd, six.string_types):
+        if kwargs.get('shell'):
+          ssh_cmd = '%s %s' % (' '.join(ssh_cmd),
+                               cros_build_lib.ShellQuote(cmd))
+        else:
+          ssh_cmd += [cmd]
+      else:
+        ssh_cmd += cmd
+
+    try:
+      return cros_build_lib.run(ssh_cmd, **kwargs)
+    except cros_build_lib.RunCommandError as e:
+      if ((e.result.returncode == SSH_ERROR_CODE and ssh_error_ok) or
+          (e.result.returncode and e.result.returncode != SSH_ERROR_CODE
+           and not check)):
+        return e.result
+      elif e.result.returncode == SSH_ERROR_CODE:
+        raise SSHConnectionError(e.result.error)
+      else:
+        raise
+    finally:
+      # Restore the previous user if we temporarily changed it earlier.
+      self.username = prev_user
+
+  def CreateTunnel(self, to_local=None, to_remote=None, connect_settings=None):
+    """Establishes a SSH tunnel to the remote device as a background process.
+
+    Args:
+      to_local: A list of PortForwardSpec objects to forward from the local
+          machine to the remote machine.
+      to_remote: A list of PortForwardSpec to forward from the remote machine
+          to the local machine.
+      connect_settings: The SSH connect settings to use.
+
+    Returns:
+      A Popen object. Note that it represents an already started background
+      process. Calling poll() on the return value can be used to check that
+      the tunnel is still running. To close the tunnel call terminate().
+    """
+
+    ssh_cmd = self._GetSSHCmd(connect_settings=connect_settings)
+    if to_local is not None:
+      ssh_cmd.extend(
+          token for spec in to_local for token in ('-L',
+                                                   spec.command_line_spec))
+    if to_remote is not None:
+      ssh_cmd.extend(
+          token for spec in to_remote for token in ('-R',
+                                                    spec.command_line_spec))
+    ssh_cmd.append('-N')
+    ssh_cmd.append(self.target_ssh_url)
+
+    logging.log(self.debug_level, '%s', cros_build_lib.CmdToStr(ssh_cmd))
+
+    return RemoteAccess._mockable_popen(ssh_cmd)
+
+  def _GetBootId(self, rebooting=False):
+    """Obtains unique boot session identifier.
+
+    If rebooting is True, uses a SSH connection with a short timeout,
+    which will wait for at most about ten seconds. If the network returns
+    an error (e.g. host unreachable) the delay can be shorter.
+    If rebooting is True and an ssh error occurs, None is returned.
+    """
+    if rebooting:
+      # In tests SSH seems to be waiting rather longer than would be expected
+      # from these parameters. These values produce a ~5 second wait.
+      connect_settings = CompileSSHConnectSettings(
+          ConnectTimeout=REBOOT_SSH_CONNECT_TIMEOUT,
+          ConnectionAttempts=REBOOT_SSH_CONNECT_ATTEMPTS)
+      result = self.RemoteSh(['cat', '/proc/sys/kernel/random/boot_id'],
+                             connect_settings=connect_settings,
+                             check=False, ssh_error_ok=True,
+                             log_output=True)
+      if result.returncode == SSH_ERROR_CODE:
+        return None
+      elif result.returncode == 0:
+        return result.output.rstrip()
+      else:
+        raise Exception('Unexpected error code %s getting boot ID.'
+                        % result.returncode)
+    else:
+      result = self.RemoteSh(['cat', '/proc/sys/kernel/random/boot_id'],
+                             log_output=True)
+      return result.output.rstrip()
+
+
+  def CheckIfRebooted(self, old_boot_id):
+    """Checks if the remote device has successfully rebooted
+
+    This compares the remote device old and current boot IDs.  If
+    ssh errors occur, the device has likely not booted and False is
+    returned.  Basically only returns True if it is proven that the
+    device has rebooted.  May throw exceptions.
+
+    Returns:
+      True if the device has successfully rebooted, False otherwise.
+    """
+    new_boot_id = self._GetBootId(rebooting=True)
+    if new_boot_id is None:
+      logging.debug('Unable to get new boot_id after reboot from boot_id %s',
+                    old_boot_id)
+      return False
+    elif new_boot_id == old_boot_id:
+      logging.debug('Checking if rebooted from boot_id %s, still running %s',
+                    old_boot_id, new_boot_id)
+      return False
+    else:
+      logging.debug('Checking if rebooted from boot_id %s, now running %s',
+                    old_boot_id, new_boot_id)
+      return True
+
+  def AwaitReboot(self, old_boot_id, timeout_sec=REBOOT_MAX_WAIT):
+    """Await reboot away from old_boot_id.
+
+    Args:
+      old_boot_id: The boot_id that must be transitioned away from for success.
+      timeout_sec: How long to wait for reboot.
+
+    Returns:
+      True if the device has successfully rebooted.
+    """
+    try:
+      timeout_util.WaitForReturnTrue(lambda: self.CheckIfRebooted(old_boot_id),
+                                     timeout_sec, period=CHECK_INTERVAL)
+    except timeout_util.TimeoutError:
+      return False
+    return True
+
+  def RemoteReboot(self, timeout_sec=REBOOT_MAX_WAIT):
+    """Reboot the remote device."""
+    logging.info('Rebooting %s...', self.remote_host)
+    old_boot_id = self._GetBootId()
+    # Use ssh_error_ok=True in the remote shell invocations because the reboot
+    # might kill sshd before the connection completes normally.
+    self.RemoteSh(['reboot'], ssh_error_ok=True, remote_sudo=True)
+    time.sleep(CHECK_INTERVAL)
+    if not self.AwaitReboot(old_boot_id, timeout_sec):
+      cros_build_lib.Die('Reboot has not completed after %s seconds; giving up.'
+                         % (timeout_sec,))
+
+  def Rsync(self, src, dest, to_local=False, follow_symlinks=False,
+            recursive=True, inplace=False, verbose=False, sudo=False,
+            remote_sudo=False, compress=True, **kwargs):
+    """Rsync a path to the remote device.
+
+    Rsync a path to the remote device. If |to_local| is set True, it
+    rsyncs the path from the remote device to the local machine.
+
+    Args:
+      src: The local src directory.
+      dest: The remote dest directory.
+      to_local: If set, rsync remote path to local path.
+      follow_symlinks: If set, transform symlinks into referent
+        path. Otherwise, copy symlinks as symlinks.
+      recursive: Whether to recursively copy entire directories.
+      inplace: If set, cause rsync to overwrite the dest files in place.  This
+        conserves space, but has some side effects - see rsync man page.
+      verbose: If set, print more verbose output during rsync file transfer.
+      sudo: If set, invoke the command via sudo.
+      remote_sudo: If set, run the command in remote shell with sudo.
+      compress: If set, compress file data during the transfer.
+      **kwargs: See cros_build_lib.run documentation.
+    """
+    kwargs.setdefault('debug_level', self.debug_level)
+
+    ssh_cmd = ' '.join(self._GetSSHCmd())
+    rsync_cmd = ['rsync', '--perms', '--verbose', '--times',
+                 '--omit-dir-times', '--exclude', '.svn']
+    rsync_cmd.append('--copy-links' if follow_symlinks else '--links')
+    rsync_sudo = 'sudo' if (
+        remote_sudo and self.username != ROOT_ACCOUNT) else ''
+    rsync_cmd += ['--rsync-path',
+                  'PATH=%s:$PATH %s rsync' % (DEV_BIN_PATHS, rsync_sudo)]
+
+    if verbose:
+      rsync_cmd.append('--progress')
+    if recursive:
+      rsync_cmd.append('--recursive')
+    if inplace:
+      rsync_cmd.append('--inplace')
+    if compress:
+      rsync_cmd.append('--compress')
+    logging.info('Using rsync compression: %s', compress)
+
+    if to_local:
+      rsync_cmd += ['--rsh', ssh_cmd,
+                    '[%s]:%s' % (self.target_ssh_url, src), dest]
+    else:
+      rsync_cmd += ['--rsh', ssh_cmd, src,
+                    '[%s]:%s' % (self.target_ssh_url, dest)]
+
+    rc_func = cros_build_lib.run
+    if sudo:
+      rc_func = cros_build_lib.sudo_run
+    return rc_func(rsync_cmd, print_cmd=verbose, **kwargs)
+
+  def RsyncToLocal(self, *args, **kwargs):
+    """Rsync a path from the remote device to the local machine."""
+    return self.Rsync(*args, to_local=kwargs.pop('to_local', True), **kwargs)
+
+  def Scp(self, src, dest, to_local=False, recursive=True, verbose=False,
+          sudo=False, **kwargs):
+    """Scp a file or directory to the remote device.
+
+    Args:
+      src: The local src file or directory.
+      dest: The remote dest location.
+      to_local: If set, scp remote path to local path.
+      recursive: Whether to recursively copy entire directories.
+      verbose: If set, print more verbose output during scp file transfer.
+      sudo: If set, invoke the command via sudo.
+      remote_sudo: If set, run the command in remote shell with sudo.
+      **kwargs: See cros_build_lib.run documentation.
+
+    Returns:
+      A CommandResult object containing the information and return code of
+      the scp command.
+    """
+    remote_sudo = kwargs.pop('remote_sudo', False)
+    if remote_sudo and self.username != ROOT_ACCOUNT:
+      # TODO: Implement scp with remote sudo.
+      raise NotImplementedError('Cannot run scp with sudo!')
+
+    kwargs.setdefault('debug_level', self.debug_level)
+    # scp relies on 'scp' being in the $PATH of the non-interactive,
+    # SSH login shell.
+    scp_cmd = ['scp']
+    if self.port:
+      scp_cmd += ['-P', str(self.port)]
+    scp_cmd += CompileSSHConnectSettings(ConnectTimeout=60)
+    scp_cmd += ['-i', self.private_key]
+
+    if not self.interactive:
+      scp_cmd.append('-n')
+
+    if recursive:
+      scp_cmd.append('-r')
+    if verbose:
+      scp_cmd.append('-v')
+
+    # Check for an IPv6 address
+    if ':' in self.remote_host:
+      target_ssh_url = '%s@[%s]' % (self.username, self.remote_host)
+    else:
+      target_ssh_url = self.target_ssh_url
+
+    if to_local:
+      scp_cmd += ['%s:%s' % (target_ssh_url, src), dest]
+    else:
+      scp_cmd += glob.glob(src) + ['%s:%s' % (target_ssh_url, dest)]
+
+    rc_func = cros_build_lib.run
+    if sudo:
+      rc_func = cros_build_lib.sudo_run
+
+    return rc_func(scp_cmd, print_cmd=verbose, **kwargs)
+
+  def ScpToLocal(self, *args, **kwargs):
+    """Scp a path from the remote device to the local machine."""
+    return self.Scp(*args, to_local=kwargs.pop('to_local', True), **kwargs)
+
+  def PipeToRemoteSh(self, producer_cmd, cmd, **kwargs):
+    """Run a local command and pipe it to a remote sh command over ssh.
+
+    Args:
+      producer_cmd: Command to run locally with its results piped to |cmd|.
+      cmd: Command to run on the remote device.
+      **kwargs: See RemoteSh for documentation.
+    """
+    result = cros_build_lib.run(producer_cmd, print_cmd=False,
+                                capture_output=True)
+    return self.RemoteSh(cmd, input=kwargs.pop('input', result.output),
+                         **kwargs)
+
+
+class RemoteDeviceHandler(object):
+  """A wrapper of RemoteDevice."""
+
+  def __init__(self, *args, **kwargs):
+    """Creates a RemoteDevice object."""
+    self.device = RemoteDevice(*args, **kwargs)
+
+  def __enter__(self):
+    """Return the temporary directory."""
+    return self.device
+
+  def __exit__(self, _type, _value, _traceback):
+    """Cleans up the device."""
+    self.device.Cleanup()
+
+
+class ChromiumOSDeviceHandler(object):
+  """A wrapper of ChromiumOSDevice."""
+
+  def __init__(self, *args, **kwargs):
+    """Creates a RemoteDevice object."""
+    self.device = ChromiumOSDevice(*args, **kwargs)
+
+  def __enter__(self):
+    """Return the temporary directory."""
+    return self.device
+
+  def __exit__(self, _type, _value, _traceback):
+    """Cleans up the device."""
+    self.device.Cleanup()
+
+
+class RemoteDevice(object):
+  """Handling basic SSH communication with a remote device."""
+
+  DEFAULT_BASE_DIR = '/tmp/remote-access'
+
+  def __init__(self, hostname, port=None, username=None,
+               base_dir=DEFAULT_BASE_DIR, connect_settings=None,
+               private_key=None, debug_level=logging.DEBUG, ping=False,
+               connect=True):
+    """Initializes a RemoteDevice object.
+
+    Args:
+      hostname: The hostname of the device.
+      port: The ssh port of the device.
+      username: The ssh login username.
+      base_dir: The base work directory to create on the device, or
+        None. Required in order to use run(), but
+        BaseRunCommand() will be available in either case.
+      connect_settings: Default SSH connection settings.
+      private_key: The identify file to pass to `ssh -i`.
+      debug_level: Setting debug level for logging.
+      ping: Whether to ping the device before attempting to connect.
+      connect: True to set up the connection, otherwise set up will
+        be automatically deferred until device use.
+    """
+    self.hostname = hostname
+    self.port = port
+    self.username = username
+    # The tempdir is for storing the rsa key and/or some temp files.
+    self.tempdir = osutils.TempDir(prefix='ssh-tmp')
+    self.connect_settings = (connect_settings if connect_settings else
+                             CompileSSHConnectSettings())
+    self.private_key = private_key
+    self.debug_level = debug_level
+    # The temporary work directories on the device.
+    self._base_dir = base_dir
+    self._work_dir = None
+    # Use GetAgent() instead of accessing this directly for deferred connect.
+    self._agent = None
+    self.cleanup_cmds = []
+
+    if ping and not self.Pingable():
+      raise DeviceNotPingableError('Device %s is not pingable.' % self.hostname)
+
+    if connect:
+      self._Connect()
+
+  def Pingable(self, timeout=20):
+    """Returns True if the device is pingable.
+
+    Args:
+      timeout: Timeout in seconds (default: 20 seconds).
+
+    Returns:
+      True if the device responded to the ping before |timeout|.
+    """
+    try:
+      addrlist = socket.getaddrinfo(self.hostname, 22)
+    except socket.gaierror:
+      # If the hostname is the name of a "Host" entry in ~/.ssh/config,
+      # it might be ssh-able but not pingable.
+      # If the hostname is truly bogus, ssh will fail immediately, so
+      # we can safely skip the ping step.
+      logging.info('Hostname "%s" not found, falling through to ssh',
+                   self.hostname)
+      return True
+
+    if addrlist[0][0] == socket.AF_INET6:
+      ping_command = 'ping6'
+    else:
+      ping_command = 'ping'
+
+    result = cros_build_lib.run(
+        [ping_command, '-c', '1', '-w', str(timeout), self.hostname],
+        check=False,
+        capture_output=True)
+    return result.returncode == 0
+
+  def GetAgent(self):
+    """Agent accessor; connects the agent if necessary."""
+    if not self._agent:
+      self._Connect()
+    return self._agent
+
+  def _Connect(self):
+    """Sets up the SSH connection and internal state."""
+    self._agent = RemoteAccess(self.hostname, self.tempdir.tempdir,
+                               port=self.port, username=self.username,
+                               private_key=self.private_key)
+
+  @property
+  def work_dir(self):
+    """The work directory to create on the device.
+
+    This property exists so we can create the remote paths on demand.  For
+    some use cases, it'll never be needed, so skipping creation is faster.
+    """
+    if self._base_dir is None:
+      return None
+
+    if self._work_dir is None:
+      self._work_dir = self.BaseRunCommand(
+          ['mkdir', '-p', self._base_dir, '&&',
+           'mktemp', '-d', '--tmpdir=%s' % self._base_dir],
+          capture_output=True).output.strip()
+      logging.debug('The temporary working directory on the device is %s',
+                    self._work_dir)
+      self.RegisterCleanupCmd(['rm', '-rf', self._work_dir])
+
+    return self._work_dir
+
+  def HasProgramInPath(self, binary):
+    """Checks if the given binary exists on the device."""
+    result = self.GetAgent().RemoteSh(
+        ['PATH=%s:$PATH which' % DEV_BIN_PATHS, binary], check=False)
+    return result.returncode == 0
+
+  def HasRsync(self):
+    """Checks if rsync exists on the device."""
+    return self.HasProgramInPath('rsync')
+
+  @memoize.MemoizedSingleCall
+  def HasGigabitEthernet(self):
+    """Checks if the device has a gigabit ethernet port.
+
+    The function checkes the device's first ethernet interface (eth0).
+    """
+    result = self.GetAgent().RemoteSh(['ethtool', 'eth0'], check=False,
+                                      capture_output=True)
+    return re.search(r'Speed: \d+000Mb/s', result.output)
+
+  def IsSELinuxAvailable(self):
+    """Check whether the device has SELinux compiled in."""
+    # Note that SELinux can be enabled for some devices that lack SELinux
+    # tools, so we need to check for the existence of the restorecon bin along
+    # with the sysfs check.
+    return (self.HasProgramInPath('restorecon') and
+            self.IfFileExists('/sys/fs/selinux/enforce'))
+
+  def IsSELinuxEnforced(self):
+    """Check whether the device has SELinux-enforced."""
+    if not self.IsSELinuxAvailable():
+      return False
+    return self.CatFile('/sys/fs/selinux/enforce', max_size=None).strip() == '1'
+
+  def RegisterCleanupCmd(self, cmd, **kwargs):
+    """Register a cleanup command to be run on the device in Cleanup().
+
+    Args:
+      cmd: command to run. See RemoteAccess.RemoteSh documentation.
+      **kwargs: keyword arguments to pass along with cmd. See
+        RemoteAccess.RemoteSh documentation.
+    """
+    self.cleanup_cmds.append((cmd, kwargs))
+
+  def Cleanup(self):
+    """Remove work/temp directories and run all registered cleanup commands."""
+    for cmd, kwargs in self.cleanup_cmds:
+      # We want to run through all cleanup commands even if there are errors.
+      kwargs.setdefault('check', False)
+      try:
+        self.BaseRunCommand(cmd, **kwargs)
+      except SSHConnectionError:
+        logging.error('Failed to connect to host in Cleanup, so '
+                      'SSHConnectionError will not be raised.')
+
+    self.tempdir.Cleanup()
+
+  def _CopyToDeviceInParallel(self, src, dest):
+    """Chop source file in chunks, send them to destination in parallel.
+
+    Transfer chunks of file in parallel and assemble in destination if the
+    file size is larger than chunk size. Fall back to scp mode otherwise.
+
+    Args:
+      src: Local path as a string.
+      dest: rsync/scp path of the form <host>:/<path> as a string.
+    """
+    src_filename = os.path.basename(src)
+    chunk_prefix = src_filename + '_'
+    with osutils.TempDir() as tempdir:
+      chunk_path = os.path.join(tempdir, chunk_prefix)
+      try:
+        cmd = ['split', '-b', str(CHUNK_SIZE), src, chunk_path]
+        cros_build_lib.run(cmd)
+        input_list = [[chunk_file, dest, 'scp']
+                      for chunk_file in glob.glob(chunk_path + '*')]
+        parallel.RunTasksInProcessPool(self.CopyToDevice,
+                                       input_list,
+                                       processes=DEGREE_OF_PARALLELISM)
+        logging.info('Assembling these chunks now.....')
+        chunks = '%s/%s*' % (dest, chunk_prefix)
+        final_dest = '%s/%s' % (dest, src_filename)
+        assemble_cmd = ['cat', chunks, '>', final_dest]
+        self.run(assemble_cmd)
+        cleanup_cmd = ['rm', '-f', chunks]
+        self.run(cleanup_cmd)
+      except IOError:
+        logging.err('Could not complete the payload transfer...')
+        raise
+    logging.info('Successfully copy %s to %s in chunks in parallel', src, dest)
+
+  def CopyToDevice(self, src, dest, mode, **kwargs):
+    """Copy path to device.
+
+    Args:
+      src: Local path as a string.
+      dest: rsync/scp path of the form <host>:/<path> as a string.
+      mode: must be one of 'rsync', 'scp', or 'parallel'.
+        * Use rsync --compress when copying compressible (factor > 2, text/log)
+        files. This uses a quite a bit of CPU but preserves bandwidth.
+        * Use rsync without compression when delta transfering a whole directory
+        tree which exists at the destination and changed very little (say
+        telemetry directory or unpacked stateful or unpacked rootfs). It also
+        often works well for an uncompressed archive, copied over a previous
+        copy (which must exist at the destination) needing minor updates.
+        * Use scp when we have incompressible files (say already compressed),
+        especially if we know no previous version exist at the destination.
+        * Use parallel when we want to transfer a large file with chunks
+        and transfer them in degree of parallelism for speed especially for
+        slow network (congested, long haul, worse SNR).
+    """
+    assert mode in ['rsync', 'scp', 'parallel']
+    logging.info('[mode:%s] copy: %s -> %s:%s', mode, src, self.hostname, dest)
+    if mode == 'parallel':
+      # Chop and send chunks in parallel only if the file size is larger than
+      # CHUNK_SIZE.
+      if os.stat(src).st_size > CHUNK_SIZE:
+        self._CopyToDeviceInParallel(src, dest)
+        return
+      else:
+        logging.info('%s is too small for parallelism, fall back to scp', src)
+        mode = 'scp'
+    msg = 'Could not copy %s to device.' % src
+    # Fall back to scp if device has no rsync. Happens when stateful is cleaned.
+    if mode == 'scp' or not self.HasRsync():
+      # scp always follow symlinks
+      kwargs.pop('follow_symlinks', None)
+      func = self.GetAgent().Scp
+    else:
+      func = self.GetAgent().Rsync
+
+    return RunCommandFuncWrapper(func, msg, src, dest, **kwargs)
+
+  def CopyFromDevice(self, src, dest, mode='scp', **kwargs):
+    """Copy path from device.
+
+    Adding --compress recommended for text like log files.
+
+    Args:
+      src: rsync/scp path of the form <host>:/<path> as a string.
+      dest: Local path as a string.
+      mode: See mode on CopyToDevice.
+    """
+    msg = 'Could not copy %s from device.' % src
+    # Fall back to scp if device has no rsync. Happens when stateful is cleaned.
+    if mode == 'scp' or not self.HasRsync():
+      # scp always follow symlinks
+      kwargs.pop('follow_symlinks', None)
+      func = self.GetAgent().ScpToLocal
+    else:
+      func = self.GetAgent().RsyncToLocal
+
+    return RunCommandFuncWrapper(func, msg, src, dest, **kwargs)
+
+  def CopyFromWorkDir(self, src, dest, **kwargs):
+    """Copy path from working directory on the device."""
+    return self.CopyFromDevice(os.path.join(self.work_dir, src), dest, **kwargs)
+
+  def CopyToWorkDir(self, src, dest='', **kwargs):
+    """Copy path to working directory on the device."""
+    return self.CopyToDevice(src, os.path.join(self.work_dir, dest), **kwargs)
+
+  def _TestPath(self, path, option, **kwargs):
+    """Tests a given path for specific options."""
+    kwargs.setdefault('check', False)
+    result = self.run(['test', option, path], **kwargs)
+    return result.returncode == 0
+
+  def IfFileExists(self, path, **kwargs):
+    """Check if the given file exists on the device."""
+    return self._TestPath(path, '-f', **kwargs)
+
+  def IfPathExists(self, path, **kwargs):
+    """Check if the given path exists on the device."""
+    return self._TestPath(path, '-e', **kwargs)
+
+  def IsDirWritable(self, path):
+    """Checks if the given directory is writable on the device.
+
+    Args:
+      path: Directory on the device to check.
+    """
+    tmp_file = os.path.join(path, '.tmp.remote_access.is.writable')
+    result = self.GetAgent().RemoteSh(
+        ['touch', tmp_file, '&&', 'rm', tmp_file],
+        check=False, remote_sudo=True, capture_output=True)
+    return result.returncode == 0
+
+  def IsFileExecutable(self, path):
+    """Check if the given file is executable on the device.
+
+    Args:
+      path: full path to the file on the device to check.
+
+    Returns:
+      True if the file is executable, and false if the file does not exist or is
+      not executable.
+    """
+    cmd = ['test', '-f', path, '-a', '-x', path,]
+    result = self.GetAgent().RemoteSh(cmd, remote_sudo=True, check=False,
+                                      capture_output=True)
+    return result.returncode == 0
+
+  def GetSize(self, path):
+    """Gets the size of the given file on the device.
+
+    Args:
+      path: full path to the file on the device.
+
+    Returns:
+      Size of the file in number of bytes.
+
+    Raises:
+      ValueError if failed to get file size from the remote output.
+      cros_build_lib.RunCommandError if |path| does not exist or the remote
+      command to get file size has failed.
+    """
+    cmd = ['du', '-Lb', '--max-depth=0', path]
+    result = self.BaseRunCommand(cmd, remote_sudo=True, capture_output=True)
+    return int(result.output.split()[0])
+
+  def CatFile(self, path, max_size=1000000):
+    """Reads the file on device to string if its size is less than |max_size|.
+
+    Args:
+      path: The full path to the file on the device to read.
+      max_size: Read the file only if its size is less than |max_size| in bytes.
+        If None, do not check its size and always cat the path.
+
+    Returns:
+      A string of the file content.
+
+    Raises:
+      CatFileError if failed to read the remote file or the file size is larger
+      than |max_size|.
+    """
+    if max_size is not None:
+      try:
+        file_size = self.GetSize(path)
+      except (ValueError, cros_build_lib.RunCommandError) as e:
+        raise CatFileError('Failed to get size of file "%s": %s' % (path, e))
+      if file_size > max_size:
+        raise CatFileError('File "%s" is larger than %d bytes' %
+                           (path, max_size))
+
+    result = self.BaseRunCommand(['cat', path], remote_sudo=True,
+                                 check=False, capture_output=True)
+    if result.returncode:
+      raise CatFileError('Failed to read file "%s" on the device' % path)
+    return result.output
+
+  def DeletePath(self, path, relative_to_work_dir=False, recursive=False):
+    """Deletes a path on the remote device.
+
+    Args:
+      path: The path on the remote device that should be deleted.
+      relative_to_work_dir: If true, the path is relative to |self.work_dir|.
+      recursive: If true, the |path| is deleted recursively.
+
+    Raises:
+      cros_build_lib.RunCommandError if |path| does not exist or the remote
+      command to delete the |path| has failed.
+    """
+    if relative_to_work_dir:
+      path = os.path.join(self.work_dir, path)
+
+    cmd = ['rm', '-f']
+    if recursive:
+      cmd += ['-r']
+    cmd += [path]
+
+    self.run(cmd)
+
+  def PipeOverSSH(self, filepath, cmd, **kwargs):
+    """Cat a file and pipe over SSH."""
+    producer_cmd = ['cat', filepath]
+    return self.GetAgent().PipeToRemoteSh(producer_cmd, cmd, **kwargs)
+
+  def GetRunningPids(self, exe, full_path=True):
+    """Get all the running pids on the device with the executable path.
+
+    Args:
+      exe: The executable path to get pids for.
+      full_path: Whether |exe| is a full executable path.
+
+    Raises:
+      RunningPidsError when failing to parse out pids from command output.
+      SSHConnectionError when error occurs during SSH connection.
+    """
+    try:
+      cmd = ['pgrep', exe]
+      if full_path:
+        cmd.append('-f')
+      result = self.GetAgent().RemoteSh(cmd, check=False,
+                                        capture_output=True)
+      try:
+        return [int(pid) for pid in result.output.splitlines()]
+      except ValueError:
+        logging.error('Parsing output failed:\n%s', result.output)
+        raise RunningPidsError('Unable to get running pids of %s' % exe)
+    except SSHConnectionError:
+      logging.error('Error connecting to device %s', self.hostname)
+      raise
+
+  def Reboot(self, timeout_sec=REBOOT_MAX_WAIT):
+    """Reboot the device."""
+    return self.GetAgent().RemoteReboot(timeout_sec=timeout_sec)
+
+  # TODO(vapier): Delete this shim once chromite & users migrate.
+  def BaseRunCommand(self, cmd, **kwargs):
+    """Backwards compat API."""
+    return self.base_run(cmd, **kwargs)
+
+  def base_run(self, cmd, **kwargs):
+    """Executes a shell command on the device with output captured by default.
+
+    Args:
+      cmd: command to run. See RemoteAccess.RemoteSh documentation.
+      **kwargs: keyword arguments to pass along with cmd. See
+        RemoteAccess.RemoteSh documentation.
+    """
+    kwargs.setdefault('debug_level', self.debug_level)
+    kwargs.setdefault('connect_settings', self.connect_settings)
+    try:
+      return self.GetAgent().RemoteSh(cmd, **kwargs)
+    except SSHConnectionError:
+      logging.error('Error connecting to device %s', self.hostname)
+      raise
+
+  def run(self, cmd, **kwargs):
+    """Executes a shell command on the device with output captured by default.
+
+    Also sets environment variables using dictionary provided by
+    keyword argument |extra_env|.
+
+    Args:
+      cmd: command to run. See RemoteAccess.RemoteSh documentation.
+      **kwargs: keyword arguments to pass along with cmd. See
+        RemoteAccess.RemoteSh documentation.
+    """
+    # Handle setting environment variables on the device by copying
+    # and sourcing a temporary environment file.
+    extra_env = kwargs.pop('extra_env', None)
+    if extra_env:
+      remote_sudo = kwargs.pop('remote_sudo', False)
+      if remote_sudo and self.GetAgent().username == ROOT_ACCOUNT:
+        remote_sudo = False
+
+      new_cmd = []
+      flat_vars = ['%s=%s' % (k, cros_build_lib.ShellQuote(v))
+                   for k, v in extra_env.items()]
+
+      # If the vars are too large for the command line, do it indirectly.
+      # We pick 32k somewhat arbitrarily -- the kernel should accept this
+      # and rarely should remote commands get near that size.
+      ARG_MAX = 32 * 1024
+
+      # What the command line would generally look like on the remote.
+      if isinstance(cmd, six.string_types):
+        if not kwargs.get('shell', False):
+          raise ValueError("'shell' must be True when 'cmd' is a string.")
+        cmdline = ' '.join(flat_vars) + ' ' + cmd
+      else:
+        if kwargs.get('shell', False):
+          raise ValueError("'shell' must be False when 'cmd' is a list.")
+        cmdline = ' '.join(flat_vars + cmd)
+      if len(cmdline) > ARG_MAX:
+        env_list = ['export %s' % x for x in flat_vars]
+        with tempfile.NamedTemporaryFile(dir=self.tempdir.tempdir,
+                                         prefix='env') as f:
+          logging.debug('Environment variables: %s', ' '.join(env_list))
+          osutils.WriteFile(f.name, '\n'.join(env_list))
+          self.CopyToWorkDir(f.name)
+          env_file = os.path.join(self.work_dir, os.path.basename(f.name))
+          new_cmd += ['.', '%s;' % env_file]
+          if remote_sudo:
+            new_cmd += ['sudo', '-E']
+      else:
+        if remote_sudo:
+          new_cmd += ['sudo']
+        new_cmd += flat_vars
+
+      if isinstance(cmd, six.string_types):
+        cmd = ' '.join(new_cmd) + ' ' + cmd
+      else:
+        cmd = new_cmd + cmd
+
+    return self.BaseRunCommand(cmd, **kwargs)
+
+  def CheckIfRebooted(self, old_boot_id):
+    """Checks if the remote device has successfully rebooted
+
+    This compares the remote device old and current boot IDs.  If
+    ssh errors occur, the device has likely not booted and False is
+    returned.  Basically only returns True if it is proven that the
+    device has rebooted.  May throw exceptions.
+
+    Returns:
+      True if the device has successfully rebooted, false otherwise.
+    """
+    return self.GetAgent().CheckIfRebooted(old_boot_id)
+
+  def AwaitReboot(self, old_boot_id):
+    """Await reboot away from old_boot_id.
+
+    Args:
+      old_boot_id: The boot_id that must be transitioned away from for success.
+
+    Returns:
+      True if the device has successfully rebooted.
+    """
+    return self.GetAgent().AwaitReboot(old_boot_id)
+
+
+class ChromiumOSDevice(RemoteDevice):
+  """Basic commands to interact with a ChromiumOS device over SSH connection."""
+
+  MAKE_DEV_SSD_BIN = '/usr/share/vboot/bin/make_dev_ssd.sh'
+  MOUNT_ROOTFS_RW_CMD = ['mount', '-o', 'remount,rw', '/']
+  LIST_MOUNTS_CMD = ['cat', '/proc/mounts']
+
+  def __init__(self, hostname, include_dev_paths=True, **kwargs):
+    """Initializes this object.
+
+    Args:
+      hostname: A network hostname.
+      include_dev_paths: If true, add DEV_BIN_PATHS to $PATH for all commands.
+      kwargs: Args to pass to the parent constructor.
+    """
+    super(ChromiumOSDevice, self).__init__(hostname, **kwargs)
+    self._orig_path = None
+    self._path = None
+    self._include_dev_paths = include_dev_paths
+    self._lsb_release = {}
+
+  @property
+  def orig_path(self):
+    """The $PATH variable on the device."""
+    if not self._orig_path:
+      try:
+        result = self.BaseRunCommand(['echo', '${PATH}'])
+      except cros_build_lib.RunCommandError as e:
+        logging.error('Failed to get $PATH on the device: %s', e.result.error)
+        raise
+
+      self._orig_path = result.output.strip()
+
+    return self._orig_path
+
+  @property
+  def path(self):
+    """The $PATH variable on the device prepended with DEV_BIN_PATHS."""
+    if not self._path:
+      # If the remote path already has our dev paths (which is common), then
+      # there is no need for us to prepend.
+      orig_paths = self.orig_path.split(':')
+      for path in reversed(DEV_BIN_PATHS.split(':')):
+        if path not in orig_paths:
+          orig_paths.insert(0, path)
+
+      self._path = ':'.join(orig_paths)
+
+    return self._path
+
+  @property
+  def lsb_release(self):
+    """The /etc/lsb-release content on the device.
+
+    Returns a dict of entries in /etc/lsb-release file. If multiple entries
+    have the same key, only the first entry is recorded. Returns an empty dict
+    if the reading command failed or the file is corrupted (i.e., does not have
+    the format of <key>=<value> for every line).
+    """
+    if not self._lsb_release:
+      try:
+        content = self.CatFile(constants.LSB_RELEASE_PATH, max_size=None)
+      except CatFileError as e:
+        logging.debug(
+            'Failed to read "%s" on the device: %s',
+            constants.LSB_RELEASE_PATH, e)
+      else:
+        try:
+          self._lsb_release = dict(e.split('=', 1)
+                                   for e in reversed(content.splitlines()))
+        except ValueError:
+          logging.error('File "%s" on the device is mal-formatted.',
+                        constants.LSB_RELEASE_PATH)
+
+    return self._lsb_release
+
+  @property
+  def board(self):
+    """The board name of the device."""
+    return self.lsb_release.get(cros_set_lsb_release.LSB_KEY_BOARD, '')
+
+  @property
+  def version(self):
+    """The OS version of the device."""
+    return self.lsb_release.get(cros_set_lsb_release.LSB_KEY_VERSION, '')
+
+  @property
+  def app_id(self):
+    """The App ID of the device."""
+    return self.lsb_release.get(cros_set_lsb_release.LSB_KEY_APPID_RELEASE, '')
+
+  def _RemountRootfsAsWritable(self):
+    """Attempts to Remount the root partition."""
+    logging.info("Remounting '/' with rw...")
+    self.run(self.MOUNT_ROOTFS_RW_CMD, check=False, remote_sudo=True)
+
+  def _RootfsIsReadOnly(self):
+    """Returns True if rootfs on is mounted as read-only."""
+    r = self.run(self.LIST_MOUNTS_CMD, capture_output=True)
+    for line in r.output.splitlines():
+      if not line:
+        continue
+
+      chunks = line.split()
+      if chunks[1] == '/' and 'ro' in chunks[3].split(','):
+        return True
+
+    return False
+
+  def DisableRootfsVerification(self):
+    """Disables device rootfs verification."""
+    logging.info('Disabling rootfs verification on device...')
+    self.run(
+        [self.MAKE_DEV_SSD_BIN, '--remove_rootfs_verification', '--force'],
+        check=False, remote_sudo=True)
+    # TODO(yjhong): Make sure an update is not pending.
+    logging.info('Need to reboot to actually disable the verification.')
+    self.Reboot()
+    # After reboot, the rootfs is mounted read-only, so remount as read-write.
+    self._RemountRootfsAsWritable()
+
+  def MountRootfsReadWrite(self):
+    """Checks mount types and remounts them as read-write if needed.
+
+    Returns:
+      True if rootfs is mounted as read-write. False otherwise.
+    """
+    if not self._RootfsIsReadOnly():
+      return True
+
+    # If the image on the device is built with rootfs verification
+    # disabled, we can simply remount '/' as read-write.
+    self._RemountRootfsAsWritable()
+
+    if not self._RootfsIsReadOnly():
+      return True
+
+    logging.info('Unable to remount rootfs as rw (normal w/verified rootfs).')
+    # If the image is built with rootfs verification, turn it off.
+    self.DisableRootfsVerification()
+
+    return not self._RootfsIsReadOnly()
+
+  def run(self, cmd, **kwargs):
+    """Executes a shell command on the device with output captured by default.
+
+    Also makes sure $PATH is set correctly by adding DEV_BIN_PATHS to
+    'PATH' in |extra_env| if self._include_dev_paths is True.
+
+    Args:
+      cmd: command to run. See RemoteAccess.RemoteSh documentation.
+      **kwargs: keyword arguments to pass along with cmd. See
+        RemoteAccess.RemoteSh documentation.
+    """
+    if self._include_dev_paths:
+      extra_env = kwargs.pop('extra_env', {})
+      extra_env.setdefault('PATH', self.path)
+      kwargs['extra_env'] = extra_env
+    return super(ChromiumOSDevice, self).run(cmd, **kwargs)
diff --git a/utils/frozen_chromite/lib/results_lib.py b/utils/frozen_chromite/lib/results_lib.py
new file mode 100644
index 0000000..8ad4b81
--- /dev/null
+++ b/utils/frozen_chromite/lib/results_lib.py
@@ -0,0 +1,313 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Classes for collecting results of our BuildStages as they run."""
+
+from __future__ import print_function
+
+import collections
+import datetime
+import math
+import os
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import failures_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+
+def _GetCheckpointFile(buildroot):
+  return os.path.join(buildroot, '.completed_stages')
+
+
+def WriteCheckpoint(buildroot):
+  """Drops a completed stages file with current state."""
+  completed_stages_file = _GetCheckpointFile(buildroot)
+  with open(completed_stages_file, 'w+') as save_file:
+    Results.SaveCompletedStages(save_file)
+
+
+def LoadCheckpoint(buildroot):
+  """Restore completed stage info from checkpoint file."""
+  completed_stages_file = _GetCheckpointFile(buildroot)
+  if not os.path.exists(completed_stages_file):
+    logging.warning('Checkpoint file not found in buildroot %s', buildroot)
+    return
+
+  with open(completed_stages_file, 'r') as load_file:
+    Results.RestoreCompletedStages(load_file)
+
+
+class RecordedTraceback(object):
+  """This class represents a traceback recorded in the list of results."""
+
+  def __init__(self, failed_stage, failed_prefix, exception, traceback):
+    """Construct a RecordedTraceback object.
+
+    Args:
+      failed_stage: The stage that failed during the build. E.g., HWTest [bvt]
+      failed_prefix: The prefix of the stage that failed. E.g., HWTest
+      exception: The raw exception object.
+      traceback: The full stack trace for the failure, as a string.
+    """
+    self.failed_stage = failed_stage
+    self.failed_prefix = failed_prefix
+    self.exception = exception
+    self.traceback = traceback
+
+
+_result_fields = ['name', 'result', 'description', 'prefix', 'board', 'time']
+Result = collections.namedtuple('Result', _result_fields)
+
+
+class _Results(object):
+  """Static class that collects the results of our BuildStages as they run."""
+
+  SUCCESS = 'Stage was successful'
+  FORGIVEN = 'Stage failed but was optional'
+  SKIPPED = 'Stage was skipped'
+  NON_FAILURE_TYPES = (SUCCESS, FORGIVEN, SKIPPED)
+
+  SPLIT_TOKEN = r'\_O_/'
+
+  def __init__(self):
+    # List of results for all stages that's built up as we run. Members are of
+    #  the form ('name', SUCCESS | FORGIVEN | Exception, None | description)
+    self._results_log = []
+
+    # A list of instances of failure_message_lib.StageFailureMessage to present
+    # the exceptions threw by failed stages.
+    self._failure_message_results = []
+
+    # Stages run in a previous run and restored. Stored as a dictionary of
+    # names to previous records.
+    self._previous = {}
+
+    self.start_time = datetime.datetime.now()
+
+  def Clear(self):
+    """Clear existing stage results."""
+    self.__init__()
+
+  def PreviouslyCompletedRecord(self, name):
+    """Check to see if this stage was previously completed.
+
+    Returns:
+      A boolean showing the stage was successful in the previous run.
+    """
+    return self._previous.get(name)
+
+  def BuildSucceededSoFar(self, buildstore=None, buildbucket_id=None,
+                          name=None):
+    """Return true if all stages so far have passing states.
+
+    This method returns true if all was successful or forgiven or skipped.
+
+    Args:
+      buildstore: A BuildStore instance to make DB calls.
+      buildbucket_id: buildbucket_id of the build to check.
+      name: stage name of current stage.
+    """
+    build_succeess = all(entry.result in self.NON_FAILURE_TYPES
+                         for entry in self._results_log)
+
+    # When timeout happens and background tasks are killed, the statuses
+    # of the background stage tasks may get lost. BuildSucceededSoFar may
+    # still return build_succeess = True when the killed stage tasks were
+    # failed. Add one more verification step in _BuildSucceededFromCIDB to
+    # check the stage status in CIDB.
+    return (build_succeess and
+            self._BuildSucceededFromCIDB(buildstore=buildstore,
+                                         buildbucket_id=buildbucket_id,
+                                         name=name))
+
+  def _BuildSucceededFromCIDB(self, buildstore=None, buildbucket_id=None,
+                              name=None):
+    """Return True if all stages recorded in buildbucket have passing states.
+
+    Args:
+      buildstore: A BuildStore instance to make DB calls.
+      buildbucket_id: buildbucket_id of the build to check.
+      name: stage name of current stage.
+    """
+    if (buildstore is not None and buildstore.AreClientsReady()
+        and buildbucket_id is not None):
+      stages = buildstore.GetBuildsStages(buildbucket_ids=[buildbucket_id])
+      for stage in stages:
+        if name is not None and stage['name'] == name:
+          logging.info("Ignore status of %s as it's the current stage.",
+                       stage['name'])
+          continue
+        if stage['status'] not in constants.BUILDER_NON_FAILURE_STATUSES:
+          logging.warning('Failure in previous stage %s with status %s.',
+                          stage['name'], stage['status'])
+          return False
+
+    return True
+
+  def StageHasResults(self, name):
+    """Return true if stage has posted results."""
+    return name in [entry.name for entry in self._results_log]
+
+  def _RecordStageFailureMessage(self, name, exception, prefix=None,
+                                 build_stage_id=None):
+    self._failure_message_results.append(
+        failures_lib.GetStageFailureMessageFromException(
+            name, build_stage_id, exception, stage_prefix_name=prefix))
+
+  def Record(self, name, result, description=None, prefix=None, board='',
+             time=0, build_stage_id=None):
+    """Store off an additional stage result.
+
+    Args:
+      name: The name of the stage (e.g. HWTest [bvt])
+      result:
+        Result should be one of:
+          Results.SUCCESS if the stage was successful.
+          Results.SKIPPED if the stage was skipped.
+          Results.FORGIVEN if the stage had warnings.
+          Otherwise, it should be the exception stage errored with.
+      description:
+        The textual backtrace of the exception, or None
+      prefix: The prefix of the stage (e.g. HWTest). Defaults to
+        the value of name.
+      board: The board associated with the stage, if any. Defaults to ''.
+      time: How long the result took to complete.
+      build_stage_id: The id of the failed build stage to record, default to
+        None.
+    """
+    if prefix is None:
+      prefix = name
+
+    # Convert exception to stage_failure_message and record it.
+    if isinstance(result, BaseException):
+      self._RecordStageFailureMessage(name, result, prefix=prefix,
+                                      build_stage_id=build_stage_id)
+
+    result = Result(name, result, description, prefix, board, time)
+    self._results_log.append(result)
+
+  def GetStageFailureMessage(self):
+    return self._failure_message_results
+
+  def Get(self):
+    """Fetch stage results.
+
+    Returns:
+      A list with one entry per stage run with a result.
+    """
+    return self._results_log
+
+  def GetPrevious(self):
+    """Fetch stage results.
+
+    Returns:
+      A list of stages names that were completed in a previous run.
+    """
+    return self._previous
+
+  def SaveCompletedStages(self, out):
+    """Save the successfully completed stages to the provided file |out|."""
+    for entry in self._results_log:
+      if entry.result != self.SUCCESS:
+        break
+      out.write(self.SPLIT_TOKEN.join(str(x) for x in entry) + '\n')
+
+  def RestoreCompletedStages(self, out):
+    """Load the successfully completed stages from the provided file |out|."""
+    # Read the file, and strip off the newlines.
+    for line in out:
+      record = line.strip().split(self.SPLIT_TOKEN)
+      if len(record) != len(_result_fields):
+        logging.warning('State file does not match expected format, ignoring.')
+        # Wipe any partial state.
+        self._previous = {}
+        break
+
+      self._previous[record[0]] = Result(*record)
+
+  def GetTracebacks(self):
+    """Get a list of the exceptions that failed the build.
+
+    Returns:
+      A list of RecordedTraceback objects.
+    """
+    tracebacks = []
+    for entry in self._results_log:
+      # If entry.result is not in NON_FAILURE_TYPES, then the stage failed, and
+      # entry.result is the exception object and entry.description is a string
+      # containing the full traceback.
+      if entry.result not in self.NON_FAILURE_TYPES:
+        traceback = RecordedTraceback(entry.name, entry.prefix, entry.result,
+                                      entry.description)
+        tracebacks.append(traceback)
+    return tracebacks
+
+  def Report(self, out, current_version=None):
+    """Generate a user friendly text display of the results data.
+
+    Args:
+      out: Output stream to write to (e.g. sys.stdout).
+      current_version: Chrome OS version associated with this report.
+    """
+    results = self._results_log
+
+    line = '*' * 60 + '\n'
+    edge = '*' * 2
+
+    if current_version:
+      out.write(line)
+      out.write(edge +
+                ' RELEASE VERSION: ' +
+                current_version +
+                '\n')
+
+    out.write(line)
+    out.write(edge + ' Stage Results\n')
+    warnings = False
+
+    for entry in results:
+      name, result, run_time = (entry.name, entry.result, entry.time)
+      timestr = datetime.timedelta(seconds=math.ceil(run_time))
+
+      # Don't print data on skipped stages.
+      if result == self.SKIPPED:
+        continue
+
+      out.write(line)
+      details = ''
+      if result == self.SUCCESS:
+        status = 'PASS'
+      elif result == self.FORGIVEN:
+        status = 'FAILED BUT FORGIVEN'
+        warnings = True
+      else:
+        status = 'FAIL'
+        if isinstance(result, cros_build_lib.RunCommandError):
+          # If there was a run error, give just the command that failed, not
+          # its full argument list, since those are usually too long.
+          details = ' in %s' % result.result.cmd[0]
+        elif isinstance(result, failures_lib.BuildScriptFailure):
+          # BuildScriptFailure errors publish a 'short' name of the
+          # command that failed.
+          details = ' in %s' % result.shortname
+        else:
+          # There was a normal error. Give the type of exception.
+          details = ' with %s' % type(result).__name__
+
+      out.write('%s %s %s (%s)%s\n' % (edge, status, name, timestr, details))
+
+    out.write(line)
+
+    for x in self.GetTracebacks():
+      if x.failed_stage and x.traceback:
+        out.write('\nFailed in stage %s:\n\n' % x.failed_stage)
+        out.write(x.traceback)
+        out.write('\n')
+
+    if warnings:
+      logging.PrintBuildbotStepWarnings(out)
+
+
+Results = _Results()
diff --git a/utils/frozen_chromite/lib/retry_stats.py b/utils/frozen_chromite/lib/retry_stats.py
new file mode 100644
index 0000000..52aab97
--- /dev/null
+++ b/utils/frozen_chromite/lib/retry_stats.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Infrastructure for collecting statistics about retries."""
+
+from __future__ import print_function
+
+import collections
+import datetime
+
+from autotest_lib.utils.frozen_chromite.lib import parallel
+from autotest_lib.utils.frozen_chromite.lib import retry_util
+
+
+# Well known categories we gather stats for.
+CIDB = 'CIDB'
+GSUTIL = 'Google Storage'
+
+
+class UnconfiguredStatsCategory(Exception):
+  """We tried to use a Stats Category without configuring it."""
+
+
+# Create one of these for each retry call.
+#   attempts: a list of all attempts to perform the action.
+StatEntry = collections.namedtuple(
+    'StatEntry',
+    ('category', 'attempts'))
+
+# Create one of these for each attempt to call the function.
+#  time: The time for this attempt in seconds.
+#  exception: None for a successful attempt, or a string exception description.
+Attempt = collections.namedtuple(
+    'Attempt',
+    ('time', 'exception'))
+
+
+# After Setup, contains a multiprocess proxy array.
+# The array holds StatEntry values for each event seen.
+_STATS_COLLECTION = None
+
+
+def SetupStats():
+  """Prepare a given category to collect stats.
+
+  This must be called BEFORE any new processes that might read or write to
+  these stat values are created. It is safe to call this more than once,
+  but most efficient to only make a single call.
+  """
+  # Pylint thinks our manager has no members.
+  m = parallel.Manager()
+
+  # pylint: disable=global-statement
+  # Create a new stats collection structure that is multiprocess usable.
+  global _STATS_COLLECTION
+  _STATS_COLLECTION = m.list()
+
+
+def _SuccessFilter(entry):
+  """Returns True if the StatEntry succeeded (perhaps after retries)."""
+  # If all attempts contain an exception, they all failed.
+  return not all(a.exception for a in entry.attempts)
+
+
+def _RetryCount(entry):
+  """Returns the number of retries in this StatEntry."""
+  # If all attempts contain an exception, they all failed.
+  return max(len(entry.attempts) - 1, 0)
+
+
+def CategoryStats(category):
+  """Return stats numbers for a given category.
+
+  success is the number of times a given command succeeded, even if it had to be
+  retried.
+
+  failure is the number of times we exhausting all retries without success.
+
+  retry is the total number of times we retried a command, unrelated to eventual
+  success or failure.
+
+  Args:
+    category: A string that defines the 'namespace' for these stats.
+
+  Returns:
+    succuess, failure, retry values as integers.
+  """
+  # Convert the multiprocess proxy list into a local simple list.
+  local_stats_collection = list(_STATS_COLLECTION)
+
+  # Extract the values for the category we care about.
+  stats = [e for e in local_stats_collection if e.category == category]
+
+  success = len([e for e in stats if _SuccessFilter(e)])
+  failure = len(stats) - success
+  retry = sum([_RetryCount(e) for e in stats])
+
+  return success, failure, retry
+
+def ReportCategoryStats(out, category):
+  """Dump stats reports for a given category.
+
+  Args:
+    out: Output stream to write to (e.g. sys.stdout).
+    category: A string that defines the 'namespace' for these stats.
+  """
+  success, failure, retry = CategoryStats(category)
+
+  line = '*' * 60 + '\n'
+  edge = '*' * 2
+
+  out.write(line)
+  out.write(edge + ' Performance Statistics for %s' % category + '\n')
+  out.write(edge + '\n')
+  out.write(edge + ' Success: %d' % success + '\n')
+  out.write(edge + ' Failure: %d' % failure + '\n')
+  out.write(edge + ' Retries: %d' % retry + '\n')
+  out.write(edge + ' Total: %d' % (success + failure) + '\n')
+  out.write(line)
+
+
+def ReportStats(out):
+  """Dump stats reports for a given category.
+
+  Args:
+    out: Output stream to write to (e.g. sys.stdout).
+    category: A string that defines the 'namespace' for these stats.
+  """
+  categories = sorted(set(e.category for e in _STATS_COLLECTION))
+
+  for category in categories:
+    ReportCategoryStats(out, category)
+
+
+def RetryWithStats(category, handler, max_retry, functor, *args, **kwargs):
+  """Wrapper around retry_util.GenericRetry that collects stats.
+
+  This wrapper collects statistics about each failure or retry. Each
+  category is defined by a unique string. Each category should be setup
+  before use (actually, before processes are forked).
+
+  All other arguments are blindly passed to retry_util.GenericRetry.
+
+  Args:
+    category: A string that defines the 'namespace' for these stats.
+    handler: See retry_util.GenericRetry.
+    max_retry: See retry_util.GenericRetry.
+    functor: See retry_util.GenericRetry.
+    args: See retry_util.GenericRetry.
+    kwargs: See retry_util.GenericRetry.
+
+  Returns:
+    See retry_util.GenericRetry raises.
+
+  Raises:
+    See retry_util.GenericRetry raises.
+  """
+  statEntry = StatEntry(category, attempts=[])
+
+  # Wrap the work method, so we can gather info.
+  def wrapper(*args, **kwargs):
+    start = datetime.datetime.now()
+
+    try:
+      result = functor(*args, **kwargs)
+    except Exception as e:
+      end = datetime.datetime.now()
+      e_description = '%s: %s' % (type(e).__name__, e)
+      statEntry.attempts.append(Attempt(end - start, e_description))
+      raise
+
+    end = datetime.datetime.now()
+    statEntry.attempts.append(Attempt(end - start, None))
+    return result
+
+  try:
+    return retry_util.GenericRetry(handler, max_retry, wrapper,
+                                   *args, **kwargs)
+  finally:
+    if _STATS_COLLECTION is not None:
+      _STATS_COLLECTION.append(statEntry)
diff --git a/utils/frozen_chromite/lib/retry_util.py b/utils/frozen_chromite/lib/retry_util.py
new file mode 100644
index 0000000..24e907c
--- /dev/null
+++ b/utils/frozen_chromite/lib/retry_util.py
@@ -0,0 +1,430 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Basic infrastructure for implementing retries."""
+
+from __future__ import print_function
+
+import functools
+import random
+import re
+import sys
+import time
+
+import six
+
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+
+
+# Match stderr of curl's --fail option to see HTTP status code.
+CURL_STATUS_RE = re.compile(br'The requested URL returned error: (\d+) ')
+
+
+def _CreateExceptionRetryHandler(exception):
+  """Returns a retry handler for given exception(s).
+
+  Please see WithRetry class document for details.
+  """
+  if not (isinstance(exception, type) and issubclass(exception, Exception) or
+          (isinstance(exception, tuple) and
+           all(issubclass(e, Exception) for e in exception))):
+    raise TypeError('exceptions should be an exception (or tuple), not %r' %
+                    exception)
+  return lambda exc: isinstance(exc, exception)
+
+
+class _RetryDelayStrategy(object):
+  """The strategy of the delay between each retry attempts.
+
+  Please see WithRetry class document for details.
+  """
+
+  def __init__(self, sleep=0, backoff_factor=1, jitter=0):
+    if sleep < 0:
+      raise ValueError('sleep must be >= 0: %s' % sleep)
+
+    if backoff_factor < 1:
+      raise ValueError('backoff_factor must be 1 or greater: %s'
+                       % backoff_factor)
+
+    if jitter < 0:
+      raise ValueError('jitter must be >= 0: %s' % jitter)
+
+    self._sleep = sleep
+    self._backoff_factor = backoff_factor
+    self._jitter = jitter
+
+  def Sleep(self, attempt):
+    """Sleep to delay the current retry."""
+    assert attempt >= 1, 'Expect attempt is always positive: %s' % attempt
+    if self._backoff_factor > 1:
+      sleep_duration = self._sleep * self._backoff_factor ** (attempt - 1)
+    else:
+      sleep_duration = self._sleep * attempt
+
+    # If |jitter| is set, add a random jitter sleep.
+    jitter = random.uniform(.5 * self._jitter, 1.5 * self._jitter)
+    total = sleep_duration + jitter
+    if total:
+      logging.debug('Retrying in %f (%f + jitter %f) seconds ...',
+                    total, sleep_duration, jitter)
+      time.sleep(total)
+
+
+class WithRetry(object):
+  """Decorator to handle retry on exception.
+
+  Examples:
+    @WithRetry(max_retry=3)
+    def _run():
+      ... do something ...
+    _run()
+
+    If _run() raises an exception, it retries at most three times.
+
+  Retrying strategy.
+
+  If the decorated function throws an Exception instance, then this class
+  checks whether the retry should be continued or not based on the given
+  |handler| or |exception| as follows.
+  - If |handler| is given, which should be a callback which takes an exception
+    and returns bool, calls it with the thrown exception.
+    If the |handler| returns True, retry will be continued. Otherwise no
+    further retry will be made, and an exception will be raised.
+  - If |exception| is given, which is an exception class or a tuple of
+    exception classes, iff the thrown exception is a instance of the given
+    exception class(es) (or its subclass), continues to retry. Otherwise no
+    further retry will be made, and an exception will be raised.
+  - If neither is given, just continues to retry on any Exception instance.
+  - Note: it is not allowed to specify both |handler| and |exception| at once.
+
+  Delay strategy.
+
+  Between for each attempt, some delay can be set, as follows.
+  - If |sleep| is given, the delay between the first and second attempts is
+    |sleep| secs.
+  - The delay between the second and third attempts, and later, depends on
+    |sleep| and |backoff_factor|.
+    - If |backoff_factor| is not given, the delay will be linearly increased,
+      as |sleep| * (number of attempts). E.g., if |sleep| is 1, the delays
+      will be 1, 2, 3, 4, 5, ... and so on.
+    - If |backoff_factor| is given, the delay will be exponentially increased,
+      as |sleep| * |backoff_factor| ** (number of attempts - 1). E.g., if
+      |sleep| is 1, and |backoff_factor| is 2, the delay will be,
+      1, 2, 4, 8, 16, ... and so on
+  - Note: Keep in mind that, if |backoff_factor| is not given, the total
+    delay time will be triangular value of |max_retry| multiplied by the
+    |sleep| value. E.g., |max_retry| is 5, and |sleep| is 10, will be
+    T5 (i.e. 5 + 4 + 3 + 2 + 1) times 10 = 150 seconds total. Rather than
+    use a large sleep value, you should lean more towards large retries
+    and lower sleep intervals, or by utilizing |backoff_factor|.
+  - In addition, for each delay, random duration of the delay can be added,
+    as 'jitter'. (Often, this helps to avoid consecutive conflicting situation)
+    |jitter| is specifies the duration of jitter delay, randomized up to
+    50% in either direction.
+  """
+
+  def __init__(self,
+               max_retry, handler=None, exception=None, log_all_retries=False,
+               sleep=0, backoff_factor=1, jitter=0,
+               raise_first_exception_on_failure=True, exception_to_raise=None,
+               status_callback=None):
+    """Initialize.
+
+    Args:
+      max_retry: A positive integer representing how many times to retry the
+          command before giving up.  Worst case, the command is invoked
+          (max_retry + 1) times before failing.
+      handler: Please see above for details.
+      exception: Please see above for details.
+      log_all_retries: when True, logs all retries.
+      sleep: Please see above for details.
+      backoff_factor: Please see above for details.
+      jitter: Please see above for details.
+      raise_first_exception_on_failure: determines which excecption is raised
+          upon failure after retries. If True, the first exception that was
+          encountered. Otherwise, the final one.
+      exception_to_raise: Optional exception type. If given, raises its
+          instance, instead of the one raised from the retry body.
+      status_callback: Optional callback invoked after each call of |functor|.
+          It takes two arguments: |attempt| which is the index of the last
+          attempt (0-based), and |success| representing whether the last attempt
+          was successfully done or not. If the callback raises an exception, no
+          further retry will be made, and the exception will be propagated to
+          the caller.
+    """
+    if max_retry < 0:
+      raise ValueError('max_retry needs to be zero or more: %d' % max_retry)
+    self._max_retry = max_retry
+
+    if handler is not None and exception is not None:
+      raise ValueError('handler and exception cannot be specified at once')
+    self._handler = (
+        handler or _CreateExceptionRetryHandler(exception or Exception))
+
+    self._log_all_retries = log_all_retries
+    self._retry_delay = _RetryDelayStrategy(sleep, backoff_factor, jitter)
+    self._raise_first_exception_on_failure = raise_first_exception_on_failure
+    self._exception_to_raise = exception_to_raise
+    self._status_callback = status_callback or (lambda attempt, success: None)
+
+  def __call__(self, func):
+    @functools.wraps(func)
+    def _Wrapper(*args, **kwargs):
+      fname = getattr(func, '__qualname__',
+                      getattr(func, '__name__', '<nameless>'))
+      exc_info = None
+      for attempt in range(self._max_retry + 1):
+        if attempt:
+          self._retry_delay.Sleep(attempt)
+
+        if attempt and self._log_all_retries:
+          logging.debug('Retrying %s (attempt %d)', fname, attempt + 1)
+
+        try:
+          ret = func(*args, **kwargs)
+        except Exception as e:
+          # Note we're not snagging BaseException, so
+          # MemoryError/KeyboardInterrupt and friends don't enter this except
+          # block.
+
+          # If raise_first_exception_on_failure, we intentionally ignore
+          # any failures in later attempts since we'll throw the original
+          # failure if all retries fail.
+          if exc_info is None or not self._raise_first_exception_on_failure:
+            exc_info = sys.exc_info()
+
+          try:
+            self._status_callback(attempt, False)
+          except Exception:
+            # In case callback raises an exception, quit the retry.
+            # For further investigation, log the original exception here.
+            logging.error('Ending retry due to Exception raised by a callback. '
+                          'Original exception raised during the attempt is '
+                          'as follows: ',
+                          exc_info=exc_info)
+            # Reraise the exception raised from the status_callback.
+            raise
+
+          if not self._handler(e):
+            logging.debug('ending retries with error: %s(%s)', e.__class__, e)
+            break
+          logging.debug('%s(%s)', e.__class__, e)
+        else:
+          # Run callback in outside of try's main block, in order to avoid
+          # accidental capture of an Exception which may be raised in callback.
+          self._status_callback(attempt, True)
+          return ret
+
+      # Did not return, meaning all attempts failed. Raise the exception.
+      if self._exception_to_raise:
+        raise self._exception_to_raise('%s: %s' % (exc_info[0], exc_info[1]))
+      six.reraise(exc_info[0], exc_info[1], exc_info[2])
+    return _Wrapper
+
+
+def GenericRetry(handler, max_retry, functor, *args, **kwargs):
+  """Generic retry loop w/ optional break out depending on exceptions.
+
+  Runs functor(*args, **(kwargs excluding params for retry)) as a retry body.
+
+  Please see WithRetry for details about retrying parameters.
+  """
+  # Note: the default value needs to be matched with the ones of WithRetry's
+  # ctor.
+  log_all_retries = kwargs.pop('log_all_retries', False)
+  delay_sec = kwargs.pop('delay_sec', 0)
+  sleep = kwargs.pop('sleep', 0)
+  backoff_factor = kwargs.pop('backoff_factor', 1)
+  status_callback = kwargs.pop('status_callback', None)
+  raise_first_exception_on_failure = kwargs.pop(
+      'raise_first_exception_on_failure', True)
+  exception_to_raise = kwargs.pop('exception_to_raise', None)
+
+  @WithRetry(
+      max_retry=max_retry, handler=handler, log_all_retries=log_all_retries,
+      sleep=sleep, backoff_factor=backoff_factor, jitter=delay_sec,
+      raise_first_exception_on_failure=raise_first_exception_on_failure,
+      exception_to_raise=exception_to_raise,
+      status_callback=status_callback)
+  def _run():
+    return functor(*args, **kwargs)
+  return _run()
+
+
+def RetryException(exception, max_retry, functor, *args, **kwargs):
+  """Convenience wrapper for GenericRetry based on exceptions.
+
+  Runs functor(*args, **(kwargs excluding params for retry)) as a retry body.
+
+  Please see WithRetry for details about retrying parameters.
+  """
+  log_all_retries = kwargs.pop('log_all_retries', False)
+  delay_sec = kwargs.pop('delay_sec', 0)
+  sleep = kwargs.pop('sleep', 0)
+  backoff_factor = kwargs.pop('backoff_factor', 1)
+  status_callback = kwargs.pop('status_callback', None)
+  raise_first_exception_on_failure = kwargs.pop(
+      'raise_first_exception_on_failure', True)
+  exception_to_raise = kwargs.pop('exception_to_raise', None)
+
+  @WithRetry(
+      max_retry=max_retry, exception=exception,
+      log_all_retries=log_all_retries,
+      sleep=sleep, backoff_factor=backoff_factor, jitter=delay_sec,
+      raise_first_exception_on_failure=raise_first_exception_on_failure,
+      exception_to_raise=exception_to_raise,
+      status_callback=status_callback)
+  def _run():
+    return functor(*args, **kwargs)
+  return _run()
+
+
+def RetryCommand(functor, max_retry, *args, **kwargs):
+  """Wrapper for run that will retry a command.
+
+  Args:
+    functor: run function to run; retries will only occur on
+      RunCommandError exceptions being thrown.
+    max_retry: A positive integer representing how many times to retry
+      the command before giving up.  Worst case, the command is invoked
+      (max_retry + 1) times before failing.
+    sleep: Optional keyword.  Multiplier for how long to sleep between
+      retries; will delay (1*sleep) the first time, then (2*sleep),
+      continuing via attempt * sleep.
+    retry_on: If provided, we will retry on any exit codes in the given list.
+      Note: A process will exit with a negative exit code if it is killed by a
+      signal. By default, we retry on all non-negative exit codes.
+    error_check: Optional callback to check the error output.  Return None to
+      fall back to |retry_on|, or True/False to set the retry directly.
+    log_retries: Whether to log a warning when retriable errors occur.
+    args: Positional args passed to run; see run for specifics.
+    kwargs: Optional args passed to run; see run for specifics.
+
+  Returns:
+    A CommandResult object.
+
+  Raises:
+    RunCommandError: Raised on error.
+  """
+  values = kwargs.pop('retry_on', None)
+  error_check = kwargs.pop('error_check', lambda x: None)
+  log_retries = kwargs.pop('log_retries', True)
+
+  def ShouldRetry(exc):
+    """Return whether we should retry on a given exception."""
+    if not ShouldRetryCommandCommon(exc):
+      return False
+    if values is None and exc.result.returncode < 0:
+      logging.info('Child process received signal %d; not retrying.',
+                   -exc.result.returncode)
+      return False
+
+    ret = error_check(exc)
+    if ret is not None:
+      return ret
+
+    if values is None or exc.result.returncode in values:
+      if log_retries:
+        logging.warning('Command failed with retriable error.\n%s', exc)
+      return True
+    return False
+
+  return GenericRetry(ShouldRetry, max_retry, functor, *args, **kwargs)
+
+
+def ShouldRetryCommandCommon(exc):
+  """Returns whether any run should retry on a given exception."""
+  if not isinstance(exc, cros_build_lib.RunCommandError):
+    return False
+  if exc.result.returncode is None:
+    logging.error('Child process failed to launch; not retrying:\n'
+                  'command: %s', exc.result.cmdstr)
+    return False
+  return True
+
+
+def RunCommandWithRetries(max_retry, *args, **kwargs):
+  """Wrapper for run that will retry a command
+
+  Args:
+    max_retry: See RetryCommand and run.
+    *args: See RetryCommand and run.
+    **kwargs: See RetryCommand and run.
+
+  Returns:
+    A CommandResult object.
+
+  Raises:
+    RunCommandError: Raised on error.
+  """
+  return RetryCommand(cros_build_lib.run, max_retry, *args, **kwargs)
+
+
+class DownloadError(Exception):
+  """Fetching file via curl failed"""
+
+
+def RunCurl(curl_args, *args, **kwargs):
+  """Runs curl and wraps around all necessary hacks.
+
+  Args:
+    curl_args: Command line to pass to curl. Must be list of str.
+    *args, **kwargs: See RunCommandWithRetries and run.
+      Note that retry_on, error_check, sleep, backoff_factor cannot be
+      overwritten.
+
+  Returns:
+    A CommandResult object.
+
+  Raises:
+    DownloadError: Whenever curl fails for any reason.
+  """
+  cmd = ['curl'] + curl_args
+
+  # These values were discerned via scraping the curl manpage; they're all
+  # retry related (dns failed, timeout occurred, etc, see  the manpage for
+  # exact specifics of each).
+  # Note we allow 22 to deal w/ 500's- they're thrown by google storage
+  # occasionally.  This is also thrown when getting 4xx, but curl doesn't
+  # make it easy to differentiate between them.
+  # Note we allow 35 to deal w/ Unknown SSL Protocol error, thrown by
+  # google storage occasionally.
+  # Finally, we do not use curl's --retry option since it generally doesn't
+  # actually retry anything; code 18 for example, it will not retry on.
+  retriable_exits = frozenset([5, 6, 7, 15, 18, 22, 26, 28, 35, 52, 56])
+
+  def _CheckExit(exc):
+    """Filter out specific error codes when getting exit 22
+
+    Curl will exit(22) for a wide range of HTTP codes -- both the 4xx and 5xx
+    set.  For the 4xx, we don't want to retry.  We have to look at the output.
+    """
+    assert isinstance(exc, cros_build_lib.RunCommandError)
+    if exc.result.returncode == 22:
+      logging.debug('curl stderr %s', exc.result.error)
+      matched = CURL_STATUS_RE.search(exc.result.error)
+      if not matched:
+        # Unexpected stderr.  It may not be error output from --fail.
+        return True
+      status_code = matched.group(1)
+      return not status_code.startswith(b'4')
+
+    # We'll let the common exit code filter do the right thing.
+    return None
+
+  try:
+    return RunCommandWithRetries(
+        10, cmd, retry_on=retriable_exits, error_check=_CheckExit,
+        sleep=3, backoff_factor=1.6,
+        stderr=True, extra_env={'LC_MESSAGES': 'C'}, *args, **kwargs)
+  except cros_build_lib.RunCommandError as e:
+    if e.result.returncode in (51, 58, 60):
+      # These are the return codes of failing certs as per 'man curl'.
+      raise DownloadError(
+          'Download failed with certificate error? Try "sudo c_rehash".')
+    raise DownloadError('Curl failed w/ exit code %i: %s' %
+                        (e.result.returncode, e.result.error))
diff --git a/utils/frozen_chromite/lib/signals.py b/utils/frozen_chromite/lib/signals.py
new file mode 100644
index 0000000..4622484
--- /dev/null
+++ b/utils/frozen_chromite/lib/signals.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2011-2012 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Signal related functionality."""
+
+from __future__ import print_function
+
+import signal
+import contextlib
+
+
+def RelaySignal(handler, signum, frame):
+  """Notify a listener returned from getsignal of receipt of a signal.
+
+  Returns:
+    True if it was relayed to the target, False otherwise.
+    False in particular occurs if the target isn't relayable.
+  """
+  if handler in (None, signal.SIG_IGN):
+    return True
+  elif handler == signal.SIG_DFL:
+    # This scenario is a fairly painful to handle fully, thus we just
+    # state we couldn't handle it and leave it to client code.
+    return False
+  handler(signum, frame)
+  return True
+
+
+def SignalModuleUsable(_signal=signal.signal, _SIGUSR1=signal.SIGUSR1):
+  """Verify that the signal module is usable and won't segfault on us.
+
+  See http://bugs.python.org/issue14173.  This function detects if the
+  signals module is no longer safe to use (which only occurs during
+  final stages of the interpreter shutdown) and heads off a segfault
+  if signal.* was accessed.
+
+  This shouldn't be used by anything other than functionality that is
+  known and unavoidably invoked by finalizer code during python shutdown.
+
+  Finally, the default args here are intentionally binding what we need
+  from the signal module to do the necessary test; invoking code shouldn't
+  pass any options, nor should any developer ever remove those default
+  options.
+
+  Note that this functionality is intended to be removed just as soon
+  as all consuming code installs their own SIGTERM handlers.
+  """
+  # Track any signals we receive while doing the check.
+  received, actual = [], None
+  def handler(signum, frame):
+    received.append([signum, frame])
+  try:
+    # Play with sigusr1, since it's not particularly used.
+    actual = _signal(_SIGUSR1, handler)
+    _signal(_SIGUSR1, actual)
+    return True
+  except (TypeError, AttributeError, SystemError, ValueError):
+    # The first three exceptions can be thrown depending on the state of the
+    # signal module internal Handlers array; we catch all, and interpret it
+    # as if we were invoked during sys.exit cleanup.
+    # The last exception can be thrown if we're trying to be used in a thread
+    # which is not the main one.  This can come up with standard python modules
+    # such as BaseHTTPServer.HTTPServer.
+    return False
+  finally:
+    # And now relay those signals to the original handler.  Not all may
+    # be delivered- the first may throw an exception for example.  Not our
+    # problem however.
+    for signum, frame in received:
+      actual(signum, frame)
+
+
[email protected]
+def DeferSignals(*args):
+  """Context Manger to defer signals during a critical block.
+
+  If a signal comes in for the masked signals, the original handler
+  is ran after the  critical block has exited.
+
+  Args:
+    args: Which signals to ignore.  If none are given, defaults to
+      SIGINT and SIGTERM.
+  """
+  signals = args
+  if not signals:
+    signals = [signal.SIGINT, signal.SIGTERM, signal.SIGALRM]
+
+  # Rather than directly setting the handler, we first pull the handlers, then
+  # set the new handler.  The ordering has to be done this way to ensure that
+  # if someone passes in a bad signum (or a signal lands prior to starting the
+  # critical block), we can restore things to pristine state.
+  handlers = dict((signum, signal.getsignal(signum)) for signum in signals)
+
+  received = []
+  def handler(signum, frame):
+    received.append((signum, frame))
+
+  try:
+    for signum in signals:
+      signal.signal(signum, handler)
+
+    yield
+
+  finally:
+    for signum, original in handlers.items():
+      signal.signal(signum, original)
+
+    for signum, frame in received:
+      RelaySignal(handlers[signum], signum, frame)
+
+
+def StrSignal(sig_num):
+  """Convert a signal number to the symbolic name
+
+  Note: Some signal number have multiple names, so you might get
+  back a confusing result like "SIGIOT|SIGABRT".  Since they have
+  the same signal number, it's impossible to say which one is right.
+
+  Args:
+    sig_num: The numeric signal you wish to convert
+
+  Returns:
+    A string of the signal name(s)
+  """
+  # Handle realtime signals first since they are unnamed.
+  if sig_num >= signal.SIGRTMIN and sig_num < signal.SIGRTMAX:
+    return 'SIGRT_%i' % sig_num
+
+  # Probe the module looking for matching signal constant.
+  sig_names = []
+  for name, num in signal.__dict__.items():
+    if name.startswith('SIG') and num == sig_num:
+      sig_names.append(name)
+  if sig_names:
+    return '|'.join(sig_names)
+  else:
+    return 'SIG_%i' % sig_num
diff --git a/utils/frozen_chromite/lib/stateful_updater.py b/utils/frozen_chromite/lib/stateful_updater.py
new file mode 100644
index 0000000..5c09a88
--- /dev/null
+++ b/utils/frozen_chromite/lib/stateful_updater.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Module for updating the stateful partition on the device.
+
+Use this module to update the stateful partition given a stateful payload
+(e.g. stateful.tgz) on the device. This module untars/uncompresses the payload
+on the device into var_new and dev_image_new directories. Optinonally, you can
+ask this module to reset a stateful partition by preparing it to be clobbered on
+reboot.
+"""
+
+from __future__ import print_function
+
+import os
+import tempfile
+
+from autotest_lib.utils.frozen_chromite.lib import constants
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import osutils
+
+
+class Error(Exception):
+  """Base exception class of StatefulUpdater errors."""
+
+
+class StatefulUpdater(object):
+  """The module for updating the stateful partition."""
+
+  UPDATE_TYPE_STANDARD = 'standard'
+  UPDATE_TYPE_CLOBBER = 'clobber'
+
+  _VAR_DIR = 'var_new'
+  _DEV_IMAGE_DIR = 'dev_image_new'
+  _UPDATE_TYPE_FILE = '.update_available'
+
+  def __init__(self, device, stateful_dir=constants.STATEFUL_DIR):
+    """Initializes the module.
+
+    Args:
+      device: The ChromiumOsDevice to be updated.
+      stateful_dir: The stateful directory on the Chromium OS device.
+    """
+    self._device = device
+    self._stateful_dir = stateful_dir
+    self._var_dir = os.path.join(self._stateful_dir, self._VAR_DIR)
+    self._dev_image_dir = os.path.join(self._stateful_dir, self._DEV_IMAGE_DIR)
+    self._update_type_file = os.path.join(self._stateful_dir,
+                                          self._UPDATE_TYPE_FILE)
+
+  def Update(self, payload_path_on_device, update_type=None):
+    """Updates the stateful partition given the update file.
+
+    Args:
+      payload_path_on_device: The path to the stateful update (stateful.tgz)
+        on the DUT.
+      update_type: The type of the stateful update to be marked. Accepted
+        values: 'standard' (default) and 'clobber'.
+    """
+    if not self._device.IfPathExists(payload_path_on_device):
+      raise Error('Missing the file: %s' % payload_path_on_device)
+
+    try:
+      cmd = ['tar', '--ignore-command-error', '--overwrite',
+             '--directory', self._stateful_dir, '-xzf', payload_path_on_device]
+      self._device.run(cmd)
+    except cros_build_lib.RunCommandError as e:
+      raise Error('Failed to untar the stateful update with error %s' % e)
+
+    # Make sure target directories are generated on the device.
+    if (not self._device.IfPathExists(self._var_dir) or
+        not self._device.IfPathExists(self._dev_image_dir)):
+      raise Error('Missing var or dev_image in stateful payload.')
+
+    self._MarkUpdateType(update_type if update_type is not None
+                         else self.UPDATE_TYPE_STANDARD)
+
+  def _MarkUpdateType(self, update_type):
+    """Marks the type of the update.
+
+    Args:
+      update_type: The type of the update to be marked. See Update()
+    """
+    if update_type not in (self.UPDATE_TYPE_CLOBBER, self.UPDATE_TYPE_STANDARD):
+      raise Error('Invalid update type %s' % update_type)
+
+    with tempfile.NamedTemporaryFile() as f:
+      if update_type == self.UPDATE_TYPE_STANDARD:
+        logging.notice('Performing standard stateful update...')
+      elif update_type == self.UPDATE_TYPE_CLOBBER:
+        logging.notice('Restoring stateful to factory_install '
+                       'with dev_image...')
+        osutils.WriteFile(f.name, 'clobber')
+
+      try:
+        self._device.CopyToDevice(f.name, self._update_type_file, 'scp')
+      except cros_build_lib.RunCommandError as e:
+        raise Error('Failed to copy update type file to device with error %s' %
+                    e)
+
+  def Reset(self):
+    """Resets the stateful partition."""
+    logging.info('Resetting stateful update state.')
+
+    try:
+      self._device.run(['rm', '-rf', self._update_type_file,
+                        self._var_dir, self._dev_image_dir])
+    except cros_build_lib.RunCommandError as e:
+      logging.warning('(ignoring) Failed to delete stateful update paths with'
+                      ' error: %s', e)
diff --git a/utils/frozen_chromite/lib/terminal.py b/utils/frozen_chromite/lib/terminal.py
new file mode 100644
index 0000000..38a0195
--- /dev/null
+++ b/utils/frozen_chromite/lib/terminal.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Terminal utilities
+
+This module handles terminal interaction including ANSI color codes.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+
+from autotest_lib.utils.frozen_chromite.lib import cros_build_lib
+
+
+class Color(object):
+  """Conditionally wraps text in ANSI color escape sequences."""
+  BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
+  BOLD = -1
+  COLOR_START = '\033[1;%dm'
+  BOLD_START = '\033[1m'
+  RESET = '\033[0m'
+
+  def __init__(self, enabled=None):
+    """Create a new Color object, optionally disabling color output.
+
+    Args:
+      enabled: True if color output should be enabled. If False then this
+        class will not add color codes at all.
+    """
+    self._enabled = enabled
+    if self._enabled is None:
+      self._enabled = self.UserEnabled()
+      if self._enabled is None:
+        self._enabled = sys.stdout.isatty()
+
+  def Start(self, color):
+    """Returns a start color code.
+
+    Args:
+      color: Color to use, .e.g BLACK, RED, etc.
+
+    Returns:
+      If color is enabled, returns an ANSI sequence to start the given color,
+      otherwise returns empty string
+    """
+    if self._enabled:
+      return self.COLOR_START % (color + 30)
+    return ''
+
+  def Stop(self):
+    """Returns a stop color code.
+
+    Returns:
+      If color is enabled, returns an ANSI color reset sequence, otherwise
+      returns empty string
+    """
+    if self._enabled:
+      return self.RESET
+    return ''
+
+  def Color(self, color, text):
+    """Returns text with conditionally added color escape sequences.
+
+    Keyword arguments:
+      color: Text color -- one of the color constants defined in this class.
+      text: The text to color.
+
+    Returns:
+      If self._enabled is False, returns the original text. If it's True,
+      returns text with color escape sequences based on the value of color.
+    """
+    if not self._enabled:
+      return text
+    if color == self.BOLD:
+      start = self.BOLD_START
+    else:
+      start = self.COLOR_START % (color + 30)
+    return start + text + self.RESET
+
+  @staticmethod
+  def UserEnabled():
+    """See if the global colorization preference is enabled ($NOCOLOR env)"""
+    is_disabled = cros_build_lib.BooleanShellValue(
+        os.environ.get('NOCOLOR'), msg='$NOCOLOR env var is invalid',
+        default=None)
+    return not is_disabled if is_disabled is not None else None
diff --git a/utils/frozen_chromite/lib/timeout_util.py b/utils/frozen_chromite/lib/timeout_util.py
new file mode 100644
index 0000000..4191597
--- /dev/null
+++ b/utils/frozen_chromite/lib/timeout_util.py
@@ -0,0 +1,317 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions for implementing timeouts."""
+
+from __future__ import print_function
+
+import contextlib
+import datetime
+import functools
+import signal
+import threading
+import time
+
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+
+
+class TimeoutError(Exception):  # pylint: disable=redefined-builtin
+  """Raises when code within Timeout has been run too long."""
+
+
+def Timedelta(num, zero_ok=False):
+  """Normalize |num| (in seconds) into a datetime.timedelta."""
+  if not isinstance(num, datetime.timedelta):
+    num = datetime.timedelta(seconds=num)
+  if zero_ok:
+    if num.total_seconds() < 0:
+      raise ValueError('timing must be >= 0, not %s' % (num,))
+  else:
+    if num.total_seconds() <= 0:
+      raise ValueError('timing must be greater than 0, not %s' % (num,))
+  return num
+
+
+def _ScheduleTimer(seconds, interval=0):
+  """Schedules the timer to raise SIGALRM.
+
+  If |seconds| is less than minimum resolution, it would be round up to the
+  resolution.
+  Note: if the seconds is very short, the signal can be delivered almost
+  immediately, so that handler can be called even in this stack.
+
+  Args:
+    seconds: How long to wait before sending SIGALRM, in seconds.
+    interval: (Optional) interval schedule for the timer.
+  """
+  # Min resolution of itimer. See man setitimer(2) for details.
+  MIN_SECONDS = 0.000001
+  signal.setitimer(signal.ITIMER_REAL, max(seconds, MIN_SECONDS), interval)
+
+
+def _CancelTimer():
+  """Cancels the currently scheduled SIGALRM timer.
+
+  Returns:
+    Previous timer, which is a pair of scheduled timeout and interval.
+  """
+  return signal.setitimer(signal.ITIMER_REAL, 0)
+
+
[email protected]
+def Timeout(max_run_time,
+            error_message='Timeout occurred- waited %(time)s seconds.',
+            reason_message=None):
+  """ContextManager that alarms if code is ran for too long.
+
+  Timeout can run nested and raises a TimeoutException if the timeout
+  is reached. Timeout can also nest underneath FatalTimeout.
+
+  Args:
+    max_run_time: How long to wait before sending SIGALRM.  May be a number
+      (in seconds, can be fractional) or a datetime.timedelta object.
+    error_message: Optional string to wrap in the TimeoutError exception on
+      timeout. If not provided, default template will be used.
+    reason_message: Optional string to be appended to the TimeoutError
+      error_message string. Provide a custom message here if you want to have
+      a purpose-specific message without overriding the default template in
+      |error_message|.
+  """
+  max_run_time = Timedelta(max_run_time).total_seconds()
+  if reason_message:
+    error_message += reason_message
+
+  # pylint: disable=unused-argument
+  def kill_us(sig_num, frame):
+    raise TimeoutError(error_message % {'time': max_run_time})
+
+  previous_time = time.time()
+  previous_timeout, previous_interval = _CancelTimer()
+  original_handler = signal.signal(signal.SIGALRM, kill_us)
+
+  try:
+    # Signal the min in case the leftover time was smaller than this timeout.
+    # This needs to be called in try block, otherwise, finally may not be
+    # called in case that the timeout duration is too short.
+    _ScheduleTimer(min(previous_timeout or float('inf'), max_run_time))
+    yield
+  finally:
+    # Cancel the alarm request and restore the original handler.
+    _CancelTimer()
+    signal.signal(signal.SIGALRM, original_handler)
+
+    # Ensure the previous handler will fire if it was meant to.
+    if previous_timeout:
+      remaining_timeout = previous_timeout - (time.time() - previous_time)
+      # It is ok to pass negative remaining_timeout. Please see also comments
+      # of _ScheduleTimer for more details.
+      _ScheduleTimer(remaining_timeout, previous_interval)
+
+
[email protected]
+def FatalTimeout(max_run_time, display_message=None):
+  """ContextManager that exits the program if code is run for too long.
+
+  This implementation is fairly simple, thus multiple timeouts
+  cannot be active at the same time.
+
+  Additionally, if the timeout has elapsed, it'll trigger a SystemExit
+  exception within the invoking code, ultimately propagating that past
+  itself.  If the underlying code tries to suppress the SystemExit, once
+  a minute it'll retrigger SystemExit until control is returned to this
+  manager.
+
+  Args:
+    max_run_time: How long to wait.  May be a number (in seconds, can be
+      fractional) or a datetime.timedelta object.
+    display_message: Optional string message to be included in timeout
+      error message, if the timeout occurs.
+  """
+  max_run_time = Timedelta(max_run_time).total_seconds()
+
+  # pylint: disable=unused-argument
+  def kill_us(sig_num, frame):
+    # While this SystemExit *should* crash it's way back up the
+    # stack to our exit handler, we do have live/production code
+    # that uses blanket except statements which could suppress this.
+    # As such, keep scheduling alarms until our exit handler runs.
+    # Note that there is a potential conflict via this code, and
+    # run's kill_timeout; thus we set the alarming interval
+    # fairly high.
+    _ScheduleTimer(60)
+
+    # The cbuildbot stage that gets aborted by this timeout should be treated as
+    # failed by buildbot.
+    error_message = ('Timeout occurred- waited %i seconds, failing.' %
+                     max_run_time)
+    if display_message:
+      error_message += ' Timeout reason: %s' % display_message
+    logging.PrintBuildbotStepFailure()
+    logging.error(error_message)
+    raise SystemExit(error_message)
+
+  if signal.getitimer(signal.ITIMER_REAL)[0]:
+    raise Exception('FatalTimeout cannot be used in parallel to other alarm '
+                    'handling code; failing')
+
+  original_handler = signal.signal(signal.SIGALRM, kill_us)
+  try:
+    _ScheduleTimer(max_run_time)
+    yield
+  finally:
+    # Cancel the alarm request and restore the original handler.
+    _CancelTimer()
+    signal.signal(signal.SIGALRM, original_handler)
+
+
+def TimeoutDecorator(max_time):
+  """Decorator used to ensure a func is interrupted if it's running too long."""
+  # Save off the built-in versions of time.time, signal.signal, and
+  # signal.alarm, in case they get mocked out later. We want to ensure that
+  # tests don't accidentally mock out the functions used by Timeout.
+  def _Save():
+    return (time.time, signal.signal, signal.setitimer, signal.getitimer,
+            signal.SIGALRM, signal.ITIMER_REAL)
+  def _Restore(values):
+    (time.time, signal.signal, signal.setitimer, signal.getitimer,
+     signal.SIGALRM, signal.ITIMER_REAL) = values
+  builtins = _Save()
+
+  def NestedTimeoutDecorator(func):
+    @functools.wraps(func)
+    def TimeoutWrapper(*args, **kwargs):
+      new = _Save()
+      try:
+        _Restore(builtins)
+        with Timeout(max_time):
+          _Restore(new)
+          try:
+            return func(*args, **kwargs)
+          finally:
+            _Restore(builtins)
+      finally:
+        _Restore(new)
+
+    return TimeoutWrapper
+
+  return NestedTimeoutDecorator
+
+
+def WaitForReturnTrue(*args, **kwargs):
+  """Periodically run a function, waiting in between runs.
+
+  Continues to run until the function returns True.
+
+  Args:
+    See WaitForReturnValue([True], ...)
+
+  Raises:
+    TimeoutError when the timeout is exceeded.
+  """
+  WaitForReturnValue([True], *args, **kwargs)
+
+
+def WaitForReturnValue(values, *args, **kwargs):
+  """Periodically run a function, waiting in between runs.
+
+  Continues to run until the function return value is in the list
+  of accepted |values|.  See WaitForSuccess for more details.
+
+  Args:
+    values: A list or set of acceptable return values.
+    *args, **kwargs: See WaitForSuccess for remaining arguments.
+
+  Returns:
+    The value most recently returned by |func|.
+
+  Raises:
+    TimeoutError when the timeout is exceeded.
+  """
+  def _Retry(return_value):
+    return return_value not in values
+
+  return WaitForSuccess(_Retry, *args, **kwargs)
+
+
+def WaitForSuccess(retry_check, func, timeout, period=1, side_effect_func=None,
+                   func_args=None, func_kwargs=None, fallback_timeout=10):
+  """Periodically run a function, waiting in between runs.
+
+  Continues to run given function until return value is accepted by retry check.
+
+  To retry based on raised exceptions see GenericRetry in retry_util.
+
+  Args:
+    retry_check: A functor that will be passed the return value of |func| as
+      the only argument.  If |func| should be retried |retry_check| should
+      return True.
+    func: The function to run to test for a value.
+    timeout: The maximum amount of time to wait.  May be a number (in seconds)
+      or a datetime.timedelta object.
+    period: How long between calls to |func|.  May be a number (in seconds) or
+      a datetime.timedelta object.
+    side_effect_func: Optional function to be called between polls of func,
+      typically to output logging messages. The remaining time will be passed
+      as a datetime.timedelta object.
+    func_args: Optional list of positional arguments to be passed to |func|.
+    func_kwargs: Optional dictionary of keyword arguments to be passed to
+                 |func|.
+    fallback_timeout: We set a secondary timeout based on sigalarm this many
+                      seconds after the initial timeout. This should NOT be
+                      considered robust, but can allow timeouts inside blocking
+                      methods.
+
+  Returns:
+    The value most recently returned by |func| that was not flagged for retry.
+
+  Raises:
+    TimeoutError when the timeout is exceeded.
+  """
+  timeout = Timedelta(timeout, zero_ok=True)
+  period = Timedelta(period, zero_ok=True)
+  fallback_timeout = Timedelta(fallback_timeout)
+  func_args = func_args or []
+  func_kwargs = func_kwargs or {}
+
+  end = datetime.datetime.now() + timeout
+
+  # pylint: disable=protected-access
+  # It is used to get the main thread '_MainThread'. Without python 3.4, there
+  # may be no perfect solutions. See this discussion for details:
+  # http://stackoverflow.com/questions/23206787.
+  is_main_thread = isinstance(threading.current_thread(),
+                              threading._MainThread)
+  # pylint: enable=protected-access
+  def retry():
+    while True:
+      # Guarantee we always run at least once.
+      value = func(*func_args, **func_kwargs)
+      if not retry_check(value):
+        return value
+
+      # Run the user's callback func if available.
+      if side_effect_func:
+        delta = end - datetime.datetime.now()
+        if delta.total_seconds() < 0:
+          delta = datetime.timedelta(seconds=0)
+        side_effect_func(delta)
+
+      # If we're just going to sleep past the timeout period, abort now.
+      delta = end - datetime.datetime.now()
+      if delta <= period:
+        raise TimeoutError('Timed out after %s' % timeout)
+
+      time.sleep(period.total_seconds())
+
+  if not is_main_thread:
+    # Warning: the function here is not working in the main thread. Since
+    # signal only works in main thread, this function may run longer than
+    # timeout or even hang.
+    return retry()
+  else:
+    # Use a sigalarm after an extra delay, in case a function we call is
+    # blocking for some reason. This should NOT be considered reliable.
+    with Timeout(timeout + fallback_timeout):
+      return retry()
diff --git a/utils/frozen_chromite/lib/ts_mon_config.py b/utils/frozen_chromite/lib/ts_mon_config.py
new file mode 100644
index 0000000..c9cc9a4
--- /dev/null
+++ b/utils/frozen_chromite/lib/ts_mon_config.py
@@ -0,0 +1,397 @@
+# -*- coding: utf-8 -*-
+# Copyright 2014 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Wrapper for inframon's command-line flag based configuration."""
+
+from __future__ import print_function
+
+import argparse
+import contextlib
+import multiprocessing
+import os
+import socket
+import signal
+import time
+
+from six.moves import queue as Queue
+
+import six
+
+from autotest_lib.utils.frozen_chromite.lib import cros_logging as logging
+from autotest_lib.utils.frozen_chromite.lib import metrics
+from autotest_lib.utils.frozen_chromite.lib import parallel
+
+try:
+  from infra_libs.ts_mon import config
+  from infra_libs.ts_mon import BooleanField
+  from infra_libs.ts_mon import IntegerField
+  from infra_libs.ts_mon import StringField
+  import googleapiclient.discovery
+except (ImportError, RuntimeError) as e:
+  config = None
+  logging.warning('Failed to import ts_mon, monitoring is disabled: %s', e)
+
+
+_WasSetup = False
+_CommonMetricFields = {}
+
+FLUSH_INTERVAL = 60
+
+
[email protected]
+def TrivialContextManager():
+  """Context manager with no side effects."""
+  yield
+
+
+def GetMetricFieldSpec(fields=None):
+  """Return the corresponding field_spec for metric fields.
+
+  Args:
+    fields: Dictionary containing metric fields.
+
+  Returns:
+    field_spec: List containing any *Field object associated with metric.
+  """
+  field_spec = []
+  if fields:
+    for key, val in fields.items():
+      if isinstance(val, bool):
+        field_spec.append(BooleanField(key))
+      elif isinstance(val, int):
+        field_spec.append(IntegerField(key))
+      elif isinstance(val, six.string_types):
+        field_spec.append(StringField(key))
+      else:
+        logging.error("Couldn't classify the metric field %s:%s",
+                      key, val)
+
+  return field_spec
+
+def AddCommonFields(fields=None, field_spec=None):
+  """Add cbuildbot-wide common fields to a given field set.
+
+  Args:
+    fields: Dictionary containing metric fields to which common metric fields
+            will be added.
+    field_spec: List containing any *Field object associated with metric.
+
+  Returns:
+    Dictionary containing complete set of metric fields to be applied to
+    metric and a list of corresponding field_spec.
+  """
+  metric_fields = (dict(_CommonMetricFields) if _CommonMetricFields
+                   else {})
+
+  if metric_fields:
+    metric_fields.update(fields or {})
+    return metric_fields, GetMetricFieldSpec(metric_fields)
+  else:
+    return fields, field_spec
+
+
+def SetupTsMonGlobalState(service_name,
+                          indirect=False,
+                          suppress_exception=True,
+                          short_lived=False,
+                          auto_flush=True,
+                          common_metric_fields=None,
+                          debug_file=None,
+                          task_num=0):
+  """Uses a dummy argument parser to get the default behavior from ts-mon.
+
+  Args:
+    service_name: The name of the task we are sending metrics from.
+    indirect: Whether to create a metrics.METRICS_QUEUE object and a separate
+              process for indirect metrics flushing. Useful for forking,
+              because forking would normally create a duplicate ts_mon thread.
+    suppress_exception: True to silence any exception during the setup. Default
+              is set to True.
+    short_lived: Whether this process is short-lived and should use the autogen
+              hostname prefix.
+    auto_flush: Whether to create a thread to automatically flush metrics every
+              minute.
+    common_metric_fields: Dictionary containing the metric fields that will be
+              added to all metrics.
+    debug_file: If non-none, send metrics to this path instead of to PubSub.
+    task_num: (Default 0) The task_num target field of the metrics to emit.
+  """
+  if not config:
+    return TrivialContextManager()
+
+  # The flushing subprocess calls .flush manually.
+  if indirect:
+    auto_flush = False
+
+  if common_metric_fields:
+    _CommonMetricFields.update(common_metric_fields)
+
+  # google-api-client has too much noisey logging.
+  options = _GenerateTsMonArgparseOptions(
+      service_name, short_lived, auto_flush, debug_file, task_num)
+
+  if indirect:
+    return _CreateTsMonFlushingProcess(options)
+  else:
+    _SetupTsMonFromOptions(options, suppress_exception)
+    return TrivialContextManager()
+
+
+def _SetupTsMonFromOptions(options, suppress_exception):
+  """Sets up ts-mon global state given parsed argparse options.
+
+  Args:
+    options: An argparse options object containing ts-mon flags.
+    suppress_exception: True to silence any exception during the setup. Default
+                        is set to True.
+  """
+  googleapiclient.discovery.logger.setLevel(logging.WARNING)
+  try:
+    config.process_argparse_options(options)
+    logging.notice('ts_mon was set up.')
+    global _WasSetup  # pylint: disable=global-statement
+    _WasSetup = True
+  except Exception as e:
+    logging.warning('Failed to configure ts_mon, monitoring is disabled: %s', e,
+                    exc_info=True)
+    if not suppress_exception:
+      raise
+
+
+def _GenerateTsMonArgparseOptions(service_name, short_lived,
+                                  auto_flush, debug_file, task_num):
+  """Generates an arg list for ts-mon to consume.
+
+  Args:
+    service_name: The name of the task we are sending metrics from.
+    short_lived: Whether this process is short-lived and should use the autogen
+                 hostname prefix.
+    auto_flush: Whether to create a thread to automatically flush metrics every
+                minute.
+    debug_file: If non-none, send metrics to this path instead of to PubSub.
+    task_num: Override the default task num of 0.
+  """
+  parser = argparse.ArgumentParser()
+  config.add_argparse_options(parser)
+
+  args = [
+      '--ts-mon-target-type', 'task',
+      '--ts-mon-task-service-name', service_name,
+      '--ts-mon-task-job-name', service_name,
+  ]
+
+  if debug_file:
+    args.extend(['--ts-mon-endpoint', 'file://' + debug_file])
+
+  # Short lived processes will have autogen: prepended to their hostname and
+  # use task-number=PID to trigger shorter retention policies under
+  # chrome-infra@, and used by a Monarch precomputation to group across the
+  # task number.
+  # Furthermore, we assume they manually call ts_mon.Flush(), because the
+  # ts_mon thread will drop messages if the process exits before it flushes.
+  if short_lived:
+    auto_flush = False
+    fqdn = socket.getfqdn().lower()
+    host = fqdn.split('.')[0]
+    args.extend(['--ts-mon-task-hostname', 'autogen:' + host,
+                 '--ts-mon-task-number', str(os.getpid())])
+  elif task_num:
+    args.extend(['--ts-mon-task-number', str(task_num)])
+
+  args.extend(['--ts-mon-flush', 'auto' if auto_flush else 'manual'])
+  return parser.parse_args(args=args)
+
+
[email protected]
+def _CreateTsMonFlushingProcess(options):
+  """Creates a separate process to flush ts_mon metrics.
+
+  Useful for multiprocessing scenarios where we don't want multiple ts-mon
+  threads send contradictory metrics. Instead, functions in
+  chromite.lib.metrics will send their calls to a Queue, which is consumed by a
+  dedicated flushing process.
+
+  Args:
+    options: An argparse options object to configure ts-mon with.
+
+  Side effects:
+    Sets chromite.lib.metrics.MESSAGE_QUEUE, which causes the metric functions
+    to send their calls to the Queue instead of creating the metrics.
+  """
+  # If this is nested, we don't need to create another queue and another
+  # message consumer. Do nothing to continue to use the existing queue.
+  if metrics.MESSAGE_QUEUE or metrics.FLUSHING_PROCESS:
+    return
+
+  with parallel.Manager() as manager:
+    message_q = manager.Queue()
+
+    metrics.FLUSHING_PROCESS = multiprocessing.Process(
+        target=lambda: _SetupAndConsumeMessages(message_q, options))
+    metrics.FLUSHING_PROCESS.start()
+
+    # this makes the chromite.lib.metric functions use the queue.
+    # note - we have to do this *after* forking the ConsumeMessages process.
+    metrics.MESSAGE_QUEUE = message_q
+
+    try:
+      yield message_q
+    finally:
+      _CleanupMetricsFlushingProcess()
+
+
+def _CleanupMetricsFlushingProcess():
+  """Sends sentinal value to flushing process and .joins it."""
+  # Now that there is no longer a process to listen to the Queue, re-set it
+  # to None so that any future metrics are created within this process.
+  message_q = metrics.MESSAGE_QUEUE
+  flushing_process = metrics.FLUSHING_PROCESS
+  metrics.MESSAGE_QUEUE = None
+  metrics.FLUSHING_PROCESS = None
+
+  # If the process has already died, we don't need to try to clean it up.
+  if not flushing_process.is_alive():
+    return
+
+  # Send the sentinal value for "flush one more time and exit".
+  try:
+    message_q.put(None)
+  # If the flushing process quits, the message Queue can become full.
+  except IOError:
+    if not flushing_process.is_alive():
+      return
+
+  logging.info('Waiting for ts_mon flushing process to finish...')
+  flushing_process.join(timeout=FLUSH_INTERVAL*2)
+  if flushing_process.is_alive():
+    flushing_process.terminate()
+  if flushing_process.exitcode:
+    logging.warning('ts_mon_config flushing process did not exit cleanly.')
+  logging.info('Finished waiting for ts_mon process.')
+
+
+def _SetupAndConsumeMessages(message_q, options):
+  """Sets up ts-mon, and starts a MetricConsumer loop.
+
+  Args:
+    message_q: The metric multiprocessing.Queue to read from.
+    options: An argparse options object to configure ts-mon with.
+  """
+  # Configure ts-mon, but don't start up a sending thread.
+  _SetupTsMonFromOptions(options, suppress_exception=True)
+  if not _WasSetup:
+    return
+
+  return MetricConsumer(message_q).Consume()
+
+
+class MetricConsumer(object):
+  """Configures ts_mon and gets metrics from a message queue.
+
+  This class is meant to be used in a subprocess. It configures itself
+  to receive a SIGHUP signal when the parent process dies, and catches the
+  signal in order to have a chance to flush any pending metrics one more time
+  before quitting.
+  """
+  def __init__(self, message_q):
+    # If our parent dies, finish flushing before exiting.
+    self.reset_after_flush = []
+    self.last_flush = 0
+    self.pending = False
+    self.message_q = message_q
+
+    if parallel.ExitWithParent(signal.SIGHUP):
+      signal.signal(signal.SIGHUP, lambda _sig, _stack: self._WaitToFlush())
+
+
+  def Consume(self):
+    """Emits metrics from self.message_q, flushing periodically.
+
+    The loop is terminated by a None entry on the Queue, which is a friendly
+    signal from the parent process that it's time to shut down. Before
+    returning, we wait to flush one more time to make sure that all the
+    metrics were sent.
+    """
+    message = self.message_q.get()
+    while message:
+      self._CallMetric(message)
+      message = self._WaitForNextMessage()
+
+    if self.pending:
+      self._WaitToFlush()
+
+
+  def _CallMetric(self, message):
+    """Calls the metric method from |message|, ignoring exceptions."""
+    try:
+      cls = getattr(metrics, message.metric_name)
+      message.method_kwargs.setdefault('fields', {})
+      message.metric_kwargs.setdefault('field_spec', [])
+      message.method_kwargs['fields'], message.metric_kwargs['field_spec'] = (
+          AddCommonFields(message.method_kwargs['fields'],
+                          message.metric_kwargs['field_spec']))
+      metric = cls(*message.metric_args, **message.metric_kwargs)
+      if message.reset_after:
+        self.reset_after_flush.append(metric)
+      getattr(metric, message.method)(
+          *message.method_args,
+          **message.method_kwargs)
+      self.pending = True
+    except Exception:
+      logging.exception('Caught an exception while running %s',
+                        _MethodCallRepr(message))
+
+
+  def _WaitForNextMessage(self):
+    """Waits for a new message, flushing every |FLUSH_INTERVAL| seconds."""
+    while True:
+      time_delta = self._FlushIfReady()
+      try:
+        timeout = FLUSH_INTERVAL - time_delta
+        message = self.message_q.get(timeout=timeout)
+        return message
+      except Queue.Empty:
+        pass
+
+
+  def _WaitToFlush(self):
+    """Sleeps until the next time we can call metrics.Flush(), then flushes."""
+    time_delta = time.time() - self.last_flush
+    time.sleep(max(0, FLUSH_INTERVAL - time_delta))
+    metrics.Flush(reset_after=self.reset_after_flush)
+
+
+  def _FlushIfReady(self):
+    """Call metrics.Flush() if we are ready and have pending metrics.
+
+    This allows us to only call flush every FLUSH_INTERVAL seconds.
+    """
+    now = time.time()
+    time_delta = now - self.last_flush
+    if time_delta > FLUSH_INTERVAL:
+      self.last_flush = now
+      time_delta = 0
+      metrics.Flush(reset_after=self.reset_after_flush)
+      self.pending = False
+    return time_delta
+
+
+def _MethodCallRepr(message):
+  """Gives a string representation of |obj|.|method|(*|args|, **|kwargs|)
+
+  Args:
+    message: A MetricCall object.
+  """
+  if not message:
+    return repr(message)
+  obj = message.metric_name
+  method = message.method
+  args = message.method_args
+  kwargs = message.method_kwargs
+
+  args_strings = ([repr(x) for x in args] +
+                  [(str(k) + '=' + repr(v))
+                   for k, v in kwargs.items()])
+  return '%s.%s(%s)' % (repr(obj), method, ', '.join(args_strings))
diff --git a/utils/frozen_chromite/scripts/__init__.py b/utils/frozen_chromite/scripts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/utils/frozen_chromite/scripts/__init__.py
diff --git a/utils/frozen_chromite/scripts/cros_set_lsb_release.py b/utils/frozen_chromite/scripts/cros_set_lsb_release.py
new file mode 100644
index 0000000..d9aff86
--- /dev/null
+++ b/utils/frozen_chromite/scripts/cros_set_lsb_release.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright 2015 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility for setting the /etc/lsb-release file of an image."""
+
+from __future__ import print_function
+
+
+# LSB keys:
+# Set google-specific version numbers:
+# CHROMEOS_RELEASE_BOARD is the target board identifier.
+# CHROMEOS_RELEASE_BRANCH_NUMBER is the Chrome OS branch number
+# CHROMEOS_RELEASE_BUILD_NUMBER is the Chrome OS build number
+# CHROMEOS_RELEASE_BUILD_TYPE is the type of build (official, from developers,
+# etc..)
+# CHROMEOS_RELEASE_CHROME_MILESTONE is the Chrome milestone (also named Chrome
+#   branch).
+# CHROMEOS_RELEASE_DESCRIPTION is the version displayed by Chrome; see
+#   chrome/browser/chromeos/chromeos_version_loader.cc.
+# CHROMEOS_RELEASE_NAME is a human readable name for the build.
+# CHROMEOS_RELEASE_PATCH_NUMBER is the patch number for the current branch.
+# CHROMEOS_RELEASE_TRACK and CHROMEOS_RELEASE_VERSION are used by the software
+#   update service.
+# CHROMEOS_RELEASE_KEYSET is the named of the keyset used to sign this build.
+# TODO(skrul):  Remove GOOGLE_RELEASE once Chromium is updated to look at
+#   CHROMEOS_RELEASE_VERSION for UserAgent data.
+LSB_KEY_NAME = 'CHROMEOS_RELEASE_NAME'
+LSB_KEY_AUSERVER = 'CHROMEOS_AUSERVER'
+LSB_KEY_DEVSERVER = 'CHROMEOS_DEVSERVER'
+LSB_KEY_TRACK = 'CHROMEOS_RELEASE_TRACK'
+LSB_KEY_BUILD_TYPE = 'CHROMEOS_RELEASE_BUILD_TYPE'
+LSB_KEY_DESCRIPTION = 'CHROMEOS_RELEASE_DESCRIPTION'
+LSB_KEY_BOARD = 'CHROMEOS_RELEASE_BOARD'
+LSB_KEY_KEYSET = 'CHROMEOS_RELEASE_KEYSET'
+LSB_KEY_UNIBUILD = 'CHROMEOS_RELEASE_UNIBUILD'
+LSB_KEY_BRANCH_NUMBER = 'CHROMEOS_RELEASE_BRANCH_NUMBER'
+LSB_KEY_BUILD_NUMBER = 'CHROMEOS_RELEASE_BUILD_NUMBER'
+LSB_KEY_CHROME_MILESTONE = 'CHROMEOS_RELEASE_CHROME_MILESTONE'
+LSB_KEY_PATCH_NUMBER = 'CHROMEOS_RELEASE_PATCH_NUMBER'
+LSB_KEY_VERSION = 'CHROMEOS_RELEASE_VERSION'
+LSB_KEY_BUILDER_PATH = 'CHROMEOS_RELEASE_BUILDER_PATH'
+LSB_KEY_GOOGLE_RELEASE = 'GOOGLE_RELEASE'
+LSB_KEY_APPID_RELEASE = 'CHROMEOS_RELEASE_APPID'
+LSB_KEY_APPID_BOARD = 'CHROMEOS_BOARD_APPID'
+LSB_KEY_APPID_CANARY = 'CHROMEOS_CANARY_APPID'
+LSB_KEY_ARC_VERSION = 'CHROMEOS_ARC_VERSION'
+LSB_KEY_ARC_ANDROID_SDK_VERSION = 'CHROMEOS_ARC_ANDROID_SDK_VERSION'
+
+CANARY_APP_ID = '{90F229CE-83E2-4FAF-8479-E368A34938B1}'
diff --git a/utils/frozen_chromite/third_party/__init__.py b/utils/frozen_chromite/third_party/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/utils/frozen_chromite/third_party/__init__.py
diff --git a/utils/frozen_chromite/third_party/apiclient/__init__.py b/utils/frozen_chromite/third_party/apiclient/__init__.py
new file mode 100644
index 0000000..d75e7a1
--- /dev/null
+++ b/utils/frozen_chromite/third_party/apiclient/__init__.py
@@ -0,0 +1,42 @@
+"""Retain apiclient as an alias for googleapiclient."""
+
+from six import iteritems
+
+import googleapiclient
+
+try:
+  import oauth2client
+except ImportError:
+  raise RuntimeError(
+      'Previous version of google-api-python-client detected; due to a '
+      'packaging issue, we cannot perform an in-place upgrade. To repair, '
+      'remove and reinstall this package, along with oauth2client and '
+      'uritemplate. One can do this with pip via\n'
+      '  pip install -I google-api-python-client'
+  )
+
+from googleapiclient import channel
+from googleapiclient import discovery
+from googleapiclient import errors
+from googleapiclient import http
+from googleapiclient import mimeparse
+from googleapiclient import model
+from googleapiclient import sample_tools
+from googleapiclient import schema
+
+__version__ = googleapiclient.__version__
+
+_SUBMODULES = {
+    'channel': channel,
+    'discovery': discovery,
+    'errors': errors,
+    'http': http,
+    'mimeparse': mimeparse,
+    'model': model,
+    'sample_tools': sample_tools,
+    'schema': schema,
+}
+
+import sys
+for module_name, module in iteritems(_SUBMODULES):
+  sys.modules['apiclient.%s' % module_name] = module
diff --git a/utils/frozen_chromite/third_party/googleapiclient/__init__.py b/utils/frozen_chromite/third_party/googleapiclient/__init__.py
new file mode 100644
index 0000000..0753586
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__version__ = "1.5.3"
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+
+try:  # Python 2.7+
+    from logging import NullHandler
+except ImportError:
+    class NullHandler(logging.Handler):
+        def emit(self, record):
+            pass
+
+logging.getLogger(__name__).addHandler(NullHandler())
diff --git a/utils/frozen_chromite/third_party/googleapiclient/channel.py b/utils/frozen_chromite/third_party/googleapiclient/channel.py
new file mode 100644
index 0000000..a38b4ff
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/channel.py
@@ -0,0 +1,293 @@
+"""Channel notifications support.
+
+Classes and functions to support channel subscriptions and notifications
+on those channels.
+
+Notes:
+  - This code is based on experimental APIs and is subject to change.
+  - Notification does not do deduplication of notification ids, that's up to
+    the receiver.
+  - Storing the Channel between calls is up to the caller.
+
+
+Example setting up a channel:
+
+  # Create a new channel that gets notifications via webhook.
+  channel = new_webhook_channel("https://example.com/my_web_hook")
+
+  # Store the channel, keyed by 'channel.id'. Store it before calling the
+  # watch method because notifications may start arriving before the watch
+  # method returns.
+  ...
+
+  resp = service.objects().watchAll(
+    bucket="some_bucket_id", body=channel.body()).execute()
+  channel.update(resp)
+
+  # Store the channel, keyed by 'channel.id'. Store it after being updated
+  # since the resource_id value will now be correct, and that's needed to
+  # stop a subscription.
+  ...
+
+
+An example Webhook implementation using webapp2. Note that webapp2 puts
+headers in a case insensitive dictionary, as headers aren't guaranteed to
+always be upper case.
+
+  id = self.request.headers[X_GOOG_CHANNEL_ID]
+
+  # Retrieve the channel by id.
+  channel = ...
+
+  # Parse notification from the headers, including validating the id.
+  n = notification_from_headers(channel, self.request.headers)
+
+  # Do app specific stuff with the notification here.
+  if n.resource_state == 'sync':
+    # Code to handle sync state.
+  elif n.resource_state == 'exists':
+    # Code to handle the exists state.
+  elif n.resource_state == 'not_exists':
+    # Code to handle the not exists state.
+
+
+Example of unsubscribing.
+
+  service.channels().stop(channel.body())
+"""
+from __future__ import absolute_import
+
+import datetime
+import uuid
+
+from googleapiclient import errors
+import six
+
+# Oauth2client < 3 has the positional helper in 'util', >= 3 has it
+# in '_helpers'.
+try:
+  from oauth2client import util
+except ImportError:
+  from oauth2client import _helpers as util
+
+
+# The unix time epoch starts at midnight 1970.
+EPOCH = datetime.datetime.utcfromtimestamp(0)
+
+# Map the names of the parameters in the JSON channel description to
+# the parameter names we use in the Channel class.
+CHANNEL_PARAMS = {
+    'address': 'address',
+    'id': 'id',
+    'expiration': 'expiration',
+    'params': 'params',
+    'resourceId': 'resource_id',
+    'resourceUri': 'resource_uri',
+    'type': 'type',
+    'token': 'token',
+    }
+
+X_GOOG_CHANNEL_ID     = 'X-GOOG-CHANNEL-ID'
+X_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'
+X_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'
+X_GOOG_RESOURCE_URI   = 'X-GOOG-RESOURCE-URI'
+X_GOOG_RESOURCE_ID    = 'X-GOOG-RESOURCE-ID'
+
+
+def _upper_header_keys(headers):
+  new_headers = {}
+  for k, v in six.iteritems(headers):
+    new_headers[k.upper()] = v
+  return new_headers
+
+
+class Notification(object):
+  """A Notification from a Channel.
+
+  Notifications are not usually constructed directly, but are returned
+  from functions like notification_from_headers().
+
+  Attributes:
+    message_number: int, The unique id number of this notification.
+    state: str, The state of the resource being monitored.
+    uri: str, The address of the resource being monitored.
+    resource_id: str, The unique identifier of the version of the resource at
+      this event.
+  """
+  @util.positional(5)
+  def __init__(self, message_number, state, resource_uri, resource_id):
+    """Notification constructor.
+
+    Args:
+      message_number: int, The unique id number of this notification.
+      state: str, The state of the resource being monitored. Can be one
+        of "exists", "not_exists", or "sync".
+      resource_uri: str, The address of the resource being monitored.
+      resource_id: str, The identifier of the watched resource.
+    """
+    self.message_number = message_number
+    self.state = state
+    self.resource_uri = resource_uri
+    self.resource_id = resource_id
+
+
+class Channel(object):
+  """A Channel for notifications.
+
+  Usually not constructed directly, instead it is returned from helper
+  functions like new_webhook_channel().
+
+  Attributes:
+    type: str, The type of delivery mechanism used by this channel. For
+      example, 'web_hook'.
+    id: str, A UUID for the channel.
+    token: str, An arbitrary string associated with the channel that
+      is delivered to the target address with each event delivered
+      over this channel.
+    address: str, The address of the receiving entity where events are
+      delivered. Specific to the channel type.
+    expiration: int, The time, in milliseconds from the epoch, when this
+      channel will expire.
+    params: dict, A dictionary of string to string, with additional parameters
+      controlling delivery channel behavior.
+    resource_id: str, An opaque id that identifies the resource that is
+      being watched. Stable across different API versions.
+    resource_uri: str, The canonicalized ID of the watched resource.
+  """
+
+  @util.positional(5)
+  def __init__(self, type, id, token, address, expiration=None,
+               params=None, resource_id="", resource_uri=""):
+    """Create a new Channel.
+
+    In user code, this Channel constructor will not typically be called
+    manually since there are functions for creating channels for each specific
+    type with a more customized set of arguments to pass.
+
+    Args:
+      type: str, The type of delivery mechanism used by this channel. For
+        example, 'web_hook'.
+      id: str, A UUID for the channel.
+      token: str, An arbitrary string associated with the channel that
+        is delivered to the target address with each event delivered
+        over this channel.
+      address: str,  The address of the receiving entity where events are
+        delivered. Specific to the channel type.
+      expiration: int, The time, in milliseconds from the epoch, when this
+        channel will expire.
+      params: dict, A dictionary of string to string, with additional parameters
+        controlling delivery channel behavior.
+      resource_id: str, An opaque id that identifies the resource that is
+        being watched. Stable across different API versions.
+      resource_uri: str, The canonicalized ID of the watched resource.
+    """
+    self.type = type
+    self.id = id
+    self.token = token
+    self.address = address
+    self.expiration = expiration
+    self.params = params
+    self.resource_id = resource_id
+    self.resource_uri = resource_uri
+
+  def body(self):
+    """Build a body from the Channel.
+
+    Constructs a dictionary that's appropriate for passing into watch()
+    methods as the value of body argument.
+
+    Returns:
+      A dictionary representation of the channel.
+    """
+    result = {
+        'id': self.id,
+        'token': self.token,
+        'type': self.type,
+        'address': self.address
+        }
+    if self.params:
+      result['params'] = self.params
+    if self.resource_id:
+      result['resourceId'] = self.resource_id
+    if self.resource_uri:
+      result['resourceUri'] = self.resource_uri
+    if self.expiration:
+      result['expiration'] = self.expiration
+
+    return result
+
+  def update(self, resp):
+    """Update a channel with information from the response of watch().
+
+    When a request is sent to watch() a resource, the response returned
+    from the watch() request is a dictionary with updated channel information,
+    such as the resource_id, which is needed when stopping a subscription.
+
+    Args:
+      resp: dict, The response from a watch() method.
+    """
+    for json_name, param_name in six.iteritems(CHANNEL_PARAMS):
+      value = resp.get(json_name)
+      if value is not None:
+        setattr(self, param_name, value)
+
+
+def notification_from_headers(channel, headers):
+  """Parse a notification from the webhook request headers, validate
+    the notification, and return a Notification object.
+
+  Args:
+    channel: Channel, The channel that the notification is associated with.
+    headers: dict, A dictionary like object that contains the request headers
+      from the webhook HTTP request.
+
+  Returns:
+    A Notification object.
+
+  Raises:
+    errors.InvalidNotificationError if the notification is invalid.
+    ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.
+  """
+  headers = _upper_header_keys(headers)
+  channel_id = headers[X_GOOG_CHANNEL_ID]
+  if channel.id != channel_id:
+    raise errors.InvalidNotificationError(
+        'Channel id mismatch: %s != %s' % (channel.id, channel_id))
+  else:
+    message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
+    state = headers[X_GOOG_RESOURCE_STATE]
+    resource_uri = headers[X_GOOG_RESOURCE_URI]
+    resource_id = headers[X_GOOG_RESOURCE_ID]
+    return Notification(message_number, state, resource_uri, resource_id)
+
+
[email protected](2)
+def new_webhook_channel(url, token=None, expiration=None, params=None):
+    """Create a new webhook Channel.
+
+    Args:
+      url: str, URL to post notifications to.
+      token: str, An arbitrary string associated with the channel that
+        is delivered to the target address with each notification delivered
+        over this channel.
+      expiration: datetime.datetime, A time in the future when the channel
+        should expire. Can also be None if the subscription should use the
+        default expiration. Note that different services may have different
+        limits on how long a subscription lasts. Check the response from the
+        watch() method to see the value the service has set for an expiration
+        time.
+      params: dict, Extra parameters to pass on channel creation. Currently
+        not used for webhook channels.
+    """
+    expiration_ms = 0
+    if expiration:
+      delta = expiration - EPOCH
+      expiration_ms = delta.microseconds/1000 + (
+          delta.seconds + delta.days*24*3600)*1000
+      if expiration_ms < 0:
+        expiration_ms = 0
+
+    return Channel('web_hook', str(uuid.uuid4()),
+                   token, url, expiration=expiration_ms,
+                   params=params)
+
diff --git a/utils/frozen_chromite/third_party/googleapiclient/discovery.py b/utils/frozen_chromite/third_party/googleapiclient/discovery.py
new file mode 100644
index 0000000..598b222
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/discovery.py
@@ -0,0 +1,1109 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for discovery based APIs.
+
+A client library for Google's discovery based APIs.
+"""
+from __future__ import absolute_import
+import six
+from six.moves import zip
+
+__author__ = '[email protected] (Joe Gregorio)'
+__all__ = [
+    'build',
+    'build_from_document',
+    'fix_method_name',
+    'key2param',
+    ]
+
+from six import BytesIO
+from six.moves import http_client
+from six.moves.urllib.parse import urlencode, urlparse, urljoin, \
+  urlunparse, parse_qsl
+
+# Standard library imports
+import copy
+try:
+  from email.generator import BytesGenerator
+except ImportError:
+  from email.generator import Generator as BytesGenerator
+from email.mime.multipart import MIMEMultipart
+from email.mime.nonmultipart import MIMENonMultipart
+import json
+import keyword
+import logging
+import mimetypes
+import os
+import re
+
+# Third-party imports
+import httplib2
+import uritemplate
+
+# Local imports
+from googleapiclient import mimeparse
+from googleapiclient.errors import HttpError
+from googleapiclient.errors import InvalidJsonError
+from googleapiclient.errors import MediaUploadSizeError
+from googleapiclient.errors import UnacceptableMimeTypeError
+from googleapiclient.errors import UnknownApiNameOrVersion
+from googleapiclient.errors import UnknownFileType
+from googleapiclient.http import BatchHttpRequest
+from googleapiclient.http import HttpRequest
+from googleapiclient.http import MediaFileUpload
+from googleapiclient.http import MediaUpload
+from googleapiclient.model import JsonModel
+from googleapiclient.model import MediaModel
+from googleapiclient.model import RawModel
+from googleapiclient.schema import Schemas
+from oauth2client.client import GoogleCredentials
+
+# Oauth2client < 3 has the positional helper in 'util', >= 3 has it
+# in '_helpers'.
+try:
+  from oauth2client.util import _add_query_parameter
+  from oauth2client.util import positional
+except ImportError:
+  from oauth2client._helpers import _add_query_parameter
+  from oauth2client._helpers import positional
+
+
+# The client library requires a version of httplib2 that supports RETRIES.
+httplib2.RETRIES = 1
+
+logger = logging.getLogger(__name__)
+
+URITEMPLATE = re.compile('{[^}]*}')
+VARNAME = re.compile('[a-zA-Z0-9_-]+')
+DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
+                 '{api}/{apiVersion}/rest')
+V1_DISCOVERY_URI = DISCOVERY_URI
+V2_DISCOVERY_URI = ('https://{api}.googleapis.com/$discovery/rest?'
+                    'version={apiVersion}')
+DEFAULT_METHOD_DOC = 'A description of how to use this function'
+HTTP_PAYLOAD_METHODS = frozenset(['PUT', 'POST', 'PATCH'])
+_MEDIA_SIZE_BIT_SHIFTS = {'KB': 10, 'MB': 20, 'GB': 30, 'TB': 40}
+BODY_PARAMETER_DEFAULT_VALUE = {
+    'description': 'The request body.',
+    'type': 'object',
+    'required': True,
+}
+MEDIA_BODY_PARAMETER_DEFAULT_VALUE = {
+    'description': ('The filename of the media request body, or an instance '
+                    'of a MediaUpload object.'),
+    'type': 'string',
+    'required': False,
+}
+
+# Parameters accepted by the stack, but not visible via discovery.
+# TODO(dhermes): Remove 'userip' in 'v2'.
+STACK_QUERY_PARAMETERS = frozenset(['trace', 'pp', 'userip', 'strict'])
+STACK_QUERY_PARAMETER_DEFAULT_VALUE = {'type': 'string', 'location': 'query'}
+
+# Library-specific reserved words beyond Python keywords.
+RESERVED_WORDS = frozenset(['body'])
+
+# patch _write_lines to avoid munging '\r' into '\n'
+# ( https://bugs.python.org/issue18886 https://bugs.python.org/issue19003 )
+class _BytesGenerator(BytesGenerator):
+  _write_lines = BytesGenerator.write
+
+def fix_method_name(name):
+  """Fix method names to avoid reserved word conflicts.
+
+  Args:
+    name: string, method name.
+
+  Returns:
+    The name with a '_' prefixed if the name is a reserved word.
+  """
+  if keyword.iskeyword(name) or name in RESERVED_WORDS:
+    return name + '_'
+  else:
+    return name
+
+
+def key2param(key):
+  """Converts key names into parameter names.
+
+  For example, converting "max-results" -> "max_results"
+
+  Args:
+    key: string, the method key name.
+
+  Returns:
+    A safe method name based on the key name.
+  """
+  result = []
+  key = list(key)
+  if not key[0].isalpha():
+    result.append('x')
+  for c in key:
+    if c.isalnum():
+      result.append(c)
+    else:
+      result.append('_')
+
+  return ''.join(result)
+
+
+@positional(2)
+def build(serviceName,
+          version,
+          http=None,
+          discoveryServiceUrl=DISCOVERY_URI,
+          developerKey=None,
+          model=None,
+          requestBuilder=HttpRequest,
+          credentials=None,
+          cache_discovery=True,
+          cache=None):
+  """Construct a Resource for interacting with an API.
+
+  Construct a Resource object for interacting with an API. The serviceName and
+  version are the names from the Discovery service.
+
+  Args:
+    serviceName: string, name of the service.
+    version: string, the version of the service.
+    http: httplib2.Http, An instance of httplib2.Http or something that acts
+      like it that HTTP requests will be made through.
+    discoveryServiceUrl: string, a URI Template that points to the location of
+      the discovery service. It should have two parameters {api} and
+      {apiVersion} that when filled in produce an absolute URI to the discovery
+      document for that service.
+    developerKey: string, key obtained from
+      https://code.google.com/apis/console.
+    model: googleapiclient.Model, converts to and from the wire format.
+    requestBuilder: googleapiclient.http.HttpRequest, encapsulator for an HTTP
+      request.
+    credentials: oauth2client.Credentials, credentials to be used for
+      authentication.
+    cache_discovery: Boolean, whether or not to cache the discovery doc.
+    cache: googleapiclient.discovery_cache.base.CacheBase, an optional
+      cache object for the discovery documents.
+
+  Returns:
+    A Resource object with methods for interacting with the service.
+  """
+  params = {
+      'api': serviceName,
+      'apiVersion': version
+      }
+
+  if http is None:
+    http = httplib2.Http()
+
+  for discovery_url in (discoveryServiceUrl, V2_DISCOVERY_URI,):
+    requested_url = uritemplate.expand(discovery_url, params)
+
+    try:
+      content = _retrieve_discovery_doc(requested_url, http, cache_discovery,
+                                        cache)
+      return build_from_document(content, base=discovery_url, http=http,
+          developerKey=developerKey, model=model, requestBuilder=requestBuilder,
+          credentials=credentials)
+    except HttpError as e:
+      if e.resp.status == http_client.NOT_FOUND:
+        continue
+      else:
+        raise e
+
+  raise UnknownApiNameOrVersion(
+        "name: %s  version: %s" % (serviceName, version))
+
+
+def _retrieve_discovery_doc(url, http, cache_discovery, cache=None):
+  """Retrieves the discovery_doc from cache or the internet.
+
+  Args:
+    url: string, the URL of the discovery document.
+    http: httplib2.Http, An instance of httplib2.Http or something that acts
+      like it through which HTTP requests will be made.
+    cache_discovery: Boolean, whether or not to cache the discovery doc.
+    cache: googleapiclient.discovery_cache.base.Cache, an optional cache
+      object for the discovery documents.
+
+  Returns:
+    A unicode string representation of the discovery document.
+  """
+  if cache_discovery:
+    from . import discovery_cache
+    from .discovery_cache import base
+    if cache is None:
+      cache = discovery_cache.autodetect()
+    if cache:
+      content = cache.get(url)
+      if content:
+        return content
+
+  actual_url = url
+  # REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
+  # variable that contains the network address of the client sending the
+  # request. If it exists then add that to the request for the discovery
+  # document to avoid exceeding the quota on discovery requests.
+  if 'REMOTE_ADDR' in os.environ:
+    actual_url = _add_query_parameter(url, 'userIp', os.environ['REMOTE_ADDR'])
+  logger.info('URL being requested: GET %s', actual_url)
+
+  resp, content = http.request(actual_url)
+
+  if resp.status >= 400:
+    raise HttpError(resp, content, uri=actual_url)
+
+  try:
+    content = content.decode('utf-8')
+  except AttributeError:
+    pass
+
+  try:
+    service = json.loads(content)
+  except ValueError as e:
+    logger.error('Failed to parse as JSON: ' + content)
+    raise InvalidJsonError()
+  if cache_discovery and cache:
+    cache.set(url, content)
+  return content
+
+
+@positional(1)
+def build_from_document(
+    service,
+    base=None,
+    future=None,
+    http=None,
+    developerKey=None,
+    model=None,
+    requestBuilder=HttpRequest,
+    credentials=None):
+  """Create a Resource for interacting with an API.
+
+  Same as `build()`, but constructs the Resource object from a discovery
+  document that is it given, as opposed to retrieving one over HTTP.
+
+  Args:
+    service: string or object, the JSON discovery document describing the API.
+      The value passed in may either be the JSON string or the deserialized
+      JSON.
+    base: string, base URI for all HTTP requests, usually the discovery URI.
+      This parameter is no longer used as rootUrl and servicePath are included
+      within the discovery document. (deprecated)
+    future: string, discovery document with future capabilities (deprecated).
+    http: httplib2.Http, An instance of httplib2.Http or something that acts
+      like it that HTTP requests will be made through.
+    developerKey: string, Key for controlling API usage, generated
+      from the API Console.
+    model: Model class instance that serializes and de-serializes requests and
+      responses.
+    requestBuilder: Takes an http request and packages it up to be executed.
+    credentials: object, credentials to be used for authentication.
+
+  Returns:
+    A Resource object with methods for interacting with the service.
+  """
+
+  if http is None:
+    http = httplib2.Http()
+
+  # future is no longer used.
+  future = {}
+
+  if isinstance(service, six.string_types):
+    service = json.loads(service)
+
+  if  'rootUrl' not in service and (isinstance(http, (HttpMock,
+                                                      HttpMockSequence))):
+      logger.error("You are using HttpMock or HttpMockSequence without" +
+                   "having the service discovery doc in cache. Try calling " +
+                   "build() without mocking once first to populate the " +
+                   "cache.")
+      raise InvalidJsonError()
+
+  base = urljoin(service['rootUrl'], service['servicePath'])
+  schema = Schemas(service)
+
+  if credentials:
+    # If credentials were passed in, we could have two cases:
+    # 1. the scopes were specified, in which case the given credentials
+    #    are used for authorizing the http;
+    # 2. the scopes were not provided (meaning the Application Default
+    #    Credentials are to be used). In this case, the Application Default
+    #    Credentials are built and used instead of the original credentials.
+    #    If there are no scopes found (meaning the given service requires no
+    #    authentication), there is no authorization of the http.
+    if (isinstance(credentials, GoogleCredentials) and
+        credentials.create_scoped_required()):
+      scopes = service.get('auth', {}).get('oauth2', {}).get('scopes', {})
+      if scopes:
+        credentials = credentials.create_scoped(list(scopes.keys()))
+      else:
+        # No need to authorize the http object
+        # if the service does not require authentication.
+        credentials = None
+
+    if credentials:
+      http = credentials.authorize(http)
+
+  if model is None:
+    features = service.get('features', [])
+    model = JsonModel('dataWrapper' in features)
+  return Resource(http=http, baseUrl=base, model=model,
+                  developerKey=developerKey, requestBuilder=requestBuilder,
+                  resourceDesc=service, rootDesc=service, schema=schema)
+
+
+def _cast(value, schema_type):
+  """Convert value to a string based on JSON Schema type.
+
+  See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
+  JSON Schema.
+
+  Args:
+    value: any, the value to convert
+    schema_type: string, the type that value should be interpreted as
+
+  Returns:
+    A string representation of 'value' based on the schema_type.
+  """
+  if schema_type == 'string':
+    if type(value) == type('') or type(value) == type(u''):
+      return value
+    else:
+      return str(value)
+  elif schema_type == 'integer':
+    return str(int(value))
+  elif schema_type == 'number':
+    return str(float(value))
+  elif schema_type == 'boolean':
+    return str(bool(value)).lower()
+  else:
+    if type(value) == type('') or type(value) == type(u''):
+      return value
+    else:
+      return str(value)
+
+
+def _media_size_to_long(maxSize):
+  """Convert a string media size, such as 10GB or 3TB into an integer.
+
+  Args:
+    maxSize: string, size as a string, such as 2MB or 7GB.
+
+  Returns:
+    The size as an integer value.
+  """
+  if len(maxSize) < 2:
+    return 0
+  units = maxSize[-2:].upper()
+  bit_shift = _MEDIA_SIZE_BIT_SHIFTS.get(units)
+  if bit_shift is not None:
+    return int(maxSize[:-2]) << bit_shift
+  else:
+    return int(maxSize)
+
+
+def _media_path_url_from_info(root_desc, path_url):
+  """Creates an absolute media path URL.
+
+  Constructed using the API root URI and service path from the discovery
+  document and the relative path for the API method.
+
+  Args:
+    root_desc: Dictionary; the entire original deserialized discovery document.
+    path_url: String; the relative URL for the API method. Relative to the API
+        root, which is specified in the discovery document.
+
+  Returns:
+    String; the absolute URI for media upload for the API method.
+  """
+  return '%(root)supload/%(service_path)s%(path)s' % {
+      'root': root_desc['rootUrl'],
+      'service_path': root_desc['servicePath'],
+      'path': path_url,
+  }
+
+
+def _fix_up_parameters(method_desc, root_desc, http_method):
+  """Updates parameters of an API method with values specific to this library.
+
+  Specifically, adds whatever global parameters are specified by the API to the
+  parameters for the individual method. Also adds parameters which don't
+  appear in the discovery document, but are available to all discovery based
+  APIs (these are listed in STACK_QUERY_PARAMETERS).
+
+  SIDE EFFECTS: This updates the parameters dictionary object in the method
+  description.
+
+  Args:
+    method_desc: Dictionary with metadata describing an API method. Value comes
+        from the dictionary of methods stored in the 'methods' key in the
+        deserialized discovery document.
+    root_desc: Dictionary; the entire original deserialized discovery document.
+    http_method: String; the HTTP method used to call the API method described
+        in method_desc.
+
+  Returns:
+    The updated Dictionary stored in the 'parameters' key of the method
+        description dictionary.
+  """
+  parameters = method_desc.setdefault('parameters', {})
+
+  # Add in the parameters common to all methods.
+  for name, description in six.iteritems(root_desc.get('parameters', {})):
+    parameters[name] = description
+
+  # Add in undocumented query parameters.
+  for name in STACK_QUERY_PARAMETERS:
+    parameters[name] = STACK_QUERY_PARAMETER_DEFAULT_VALUE.copy()
+
+  # Add 'body' (our own reserved word) to parameters if the method supports
+  # a request payload.
+  if http_method in HTTP_PAYLOAD_METHODS and 'request' in method_desc:
+    body = BODY_PARAMETER_DEFAULT_VALUE.copy()
+    body.update(method_desc['request'])
+    parameters['body'] = body
+
+  return parameters
+
+
+def _fix_up_media_upload(method_desc, root_desc, path_url, parameters):
+  """Updates parameters of API by adding 'media_body' if supported by method.
+
+  SIDE EFFECTS: If the method supports media upload and has a required body,
+  sets body to be optional (required=False) instead. Also, if there is a
+  'mediaUpload' in the method description, adds 'media_upload' key to
+  parameters.
+
+  Args:
+    method_desc: Dictionary with metadata describing an API method. Value comes
+        from the dictionary of methods stored in the 'methods' key in the
+        deserialized discovery document.
+    root_desc: Dictionary; the entire original deserialized discovery document.
+    path_url: String; the relative URL for the API method. Relative to the API
+        root, which is specified in the discovery document.
+    parameters: A dictionary describing method parameters for method described
+        in method_desc.
+
+  Returns:
+    Triple (accept, max_size, media_path_url) where:
+      - accept is a list of strings representing what content types are
+        accepted for media upload. Defaults to empty list if not in the
+        discovery document.
+      - max_size is a long representing the max size in bytes allowed for a
+        media upload. Defaults to 0L if not in the discovery document.
+      - media_path_url is a String; the absolute URI for media upload for the
+        API method. Constructed using the API root URI and service path from
+        the discovery document and the relative path for the API method. If
+        media upload is not supported, this is None.
+  """
+  media_upload = method_desc.get('mediaUpload', {})
+  accept = media_upload.get('accept', [])
+  max_size = _media_size_to_long(media_upload.get('maxSize', ''))
+  media_path_url = None
+
+  if media_upload:
+    media_path_url = _media_path_url_from_info(root_desc, path_url)
+    parameters['media_body'] = MEDIA_BODY_PARAMETER_DEFAULT_VALUE.copy()
+    if 'body' in parameters:
+      parameters['body']['required'] = False
+
+  return accept, max_size, media_path_url
+
+
+def _fix_up_method_description(method_desc, root_desc):
+  """Updates a method description in a discovery document.
+
+  SIDE EFFECTS: Changes the parameters dictionary in the method description with
+  extra parameters which are used locally.
+
+  Args:
+    method_desc: Dictionary with metadata describing an API method. Value comes
+        from the dictionary of methods stored in the 'methods' key in the
+        deserialized discovery document.
+    root_desc: Dictionary; the entire original deserialized discovery document.
+
+  Returns:
+    Tuple (path_url, http_method, method_id, accept, max_size, media_path_url)
+    where:
+      - path_url is a String; the relative URL for the API method. Relative to
+        the API root, which is specified in the discovery document.
+      - http_method is a String; the HTTP method used to call the API method
+        described in the method description.
+      - method_id is a String; the name of the RPC method associated with the
+        API method, and is in the method description in the 'id' key.
+      - accept is a list of strings representing what content types are
+        accepted for media upload. Defaults to empty list if not in the
+        discovery document.
+      - max_size is a long representing the max size in bytes allowed for a
+        media upload. Defaults to 0L if not in the discovery document.
+      - media_path_url is a String; the absolute URI for media upload for the
+        API method. Constructed using the API root URI and service path from
+        the discovery document and the relative path for the API method. If
+        media upload is not supported, this is None.
+  """
+  path_url = method_desc['path']
+  http_method = method_desc['httpMethod']
+  method_id = method_desc['id']
+
+  parameters = _fix_up_parameters(method_desc, root_desc, http_method)
+  # Order is important. `_fix_up_media_upload` needs `method_desc` to have a
+  # 'parameters' key and needs to know if there is a 'body' parameter because it
+  # also sets a 'media_body' parameter.
+  accept, max_size, media_path_url = _fix_up_media_upload(
+      method_desc, root_desc, path_url, parameters)
+
+  return path_url, http_method, method_id, accept, max_size, media_path_url
+
+
+def _urljoin(base, url):
+  """Custom urljoin replacement supporting : before / in url."""
+  # In general, it's unsafe to simply join base and url. However, for
+  # the case of discovery documents, we know:
+  #  * base will never contain params, query, or fragment
+  #  * url will never contain a scheme or net_loc.
+  # In general, this means we can safely join on /; we just need to
+  # ensure we end up with precisely one / joining base and url. The
+  # exception here is the case of media uploads, where url will be an
+  # absolute url.
+  if url.startswith('http://') or url.startswith('https://'):
+    return urljoin(base, url)
+  new_base = base if base.endswith('/') else base + '/'
+  new_url = url[1:] if url.startswith('/') else url
+  return new_base + new_url
+
+
+# TODO(dhermes): Convert this class to ResourceMethod and make it callable
+class ResourceMethodParameters(object):
+  """Represents the parameters associated with a method.
+
+  Attributes:
+    argmap: Map from method parameter name (string) to query parameter name
+        (string).
+    required_params: List of required parameters (represented by parameter
+        name as string).
+    repeated_params: List of repeated parameters (represented by parameter
+        name as string).
+    pattern_params: Map from method parameter name (string) to regular
+        expression (as a string). If the pattern is set for a parameter, the
+        value for that parameter must match the regular expression.
+    query_params: List of parameters (represented by parameter name as string)
+        that will be used in the query string.
+    path_params: Set of parameters (represented by parameter name as string)
+        that will be used in the base URL path.
+    param_types: Map from method parameter name (string) to parameter type. Type
+        can be any valid JSON schema type; valid values are 'any', 'array',
+        'boolean', 'integer', 'number', 'object', or 'string'. Reference:
+        http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
+    enum_params: Map from method parameter name (string) to list of strings,
+       where each list of strings is the list of acceptable enum values.
+  """
+
+  def __init__(self, method_desc):
+    """Constructor for ResourceMethodParameters.
+
+    Sets default values and defers to set_parameters to populate.
+
+    Args:
+      method_desc: Dictionary with metadata describing an API method. Value
+          comes from the dictionary of methods stored in the 'methods' key in
+          the deserialized discovery document.
+    """
+    self.argmap = {}
+    self.required_params = []
+    self.repeated_params = []
+    self.pattern_params = {}
+    self.query_params = []
+    # TODO(dhermes): Change path_params to a list if the extra URITEMPLATE
+    #                parsing is gotten rid of.
+    self.path_params = set()
+    self.param_types = {}
+    self.enum_params = {}
+
+    self.set_parameters(method_desc)
+
+  def set_parameters(self, method_desc):
+    """Populates maps and lists based on method description.
+
+    Iterates through each parameter for the method and parses the values from
+    the parameter dictionary.
+
+    Args:
+      method_desc: Dictionary with metadata describing an API method. Value
+          comes from the dictionary of methods stored in the 'methods' key in
+          the deserialized discovery document.
+    """
+    for arg, desc in six.iteritems(method_desc.get('parameters', {})):
+      param = key2param(arg)
+      self.argmap[param] = arg
+
+      if desc.get('pattern'):
+        self.pattern_params[param] = desc['pattern']
+      if desc.get('enum'):
+        self.enum_params[param] = desc['enum']
+      if desc.get('required'):
+        self.required_params.append(param)
+      if desc.get('repeated'):
+        self.repeated_params.append(param)
+      if desc.get('location') == 'query':
+        self.query_params.append(param)
+      if desc.get('location') == 'path':
+        self.path_params.add(param)
+      self.param_types[param] = desc.get('type', 'string')
+
+    # TODO(dhermes): Determine if this is still necessary. Discovery based APIs
+    #                should have all path parameters already marked with
+    #                'location: path'.
+    for match in URITEMPLATE.finditer(method_desc['path']):
+      for namematch in VARNAME.finditer(match.group(0)):
+        name = key2param(namematch.group(0))
+        self.path_params.add(name)
+        if name in self.query_params:
+          self.query_params.remove(name)
+
+
+def createMethod(methodName, methodDesc, rootDesc, schema):
+  """Creates a method for attaching to a Resource.
+
+  Args:
+    methodName: string, name of the method to use.
+    methodDesc: object, fragment of deserialized discovery document that
+      describes the method.
+    rootDesc: object, the entire deserialized discovery document.
+    schema: object, mapping of schema names to schema descriptions.
+  """
+  methodName = fix_method_name(methodName)
+  (pathUrl, httpMethod, methodId, accept,
+   maxSize, mediaPathUrl) = _fix_up_method_description(methodDesc, rootDesc)
+
+  parameters = ResourceMethodParameters(methodDesc)
+
+  def method(self, **kwargs):
+    # Don't bother with doc string, it will be over-written by createMethod.
+
+    for name in six.iterkeys(kwargs):
+      if name not in parameters.argmap:
+        raise TypeError('Got an unexpected keyword argument "%s"' % name)
+
+    # Remove args that have a value of None.
+    keys = list(kwargs.keys())
+    for name in keys:
+      if kwargs[name] is None:
+        del kwargs[name]
+
+    for name in parameters.required_params:
+      if name not in kwargs:
+        raise TypeError('Missing required parameter "%s"' % name)
+
+    for name, regex in six.iteritems(parameters.pattern_params):
+      if name in kwargs:
+        if isinstance(kwargs[name], six.string_types):
+          pvalues = [kwargs[name]]
+        else:
+          pvalues = kwargs[name]
+        for pvalue in pvalues:
+          if re.match(regex, pvalue) is None:
+            raise TypeError(
+                'Parameter "%s" value "%s" does not match the pattern "%s"' %
+                (name, pvalue, regex))
+
+    for name, enums in six.iteritems(parameters.enum_params):
+      if name in kwargs:
+        # We need to handle the case of a repeated enum
+        # name differently, since we want to handle both
+        # arg='value' and arg=['value1', 'value2']
+        if (name in parameters.repeated_params and
+            not isinstance(kwargs[name], six.string_types)):
+          values = kwargs[name]
+        else:
+          values = [kwargs[name]]
+        for value in values:
+          if value not in enums:
+            raise TypeError(
+                'Parameter "%s" value "%s" is not an allowed value in "%s"' %
+                (name, value, str(enums)))
+
+    actual_query_params = {}
+    actual_path_params = {}
+    for key, value in six.iteritems(kwargs):
+      to_type = parameters.param_types.get(key, 'string')
+      # For repeated parameters we cast each member of the list.
+      if key in parameters.repeated_params and type(value) == type([]):
+        cast_value = [_cast(x, to_type) for x in value]
+      else:
+        cast_value = _cast(value, to_type)
+      if key in parameters.query_params:
+        actual_query_params[parameters.argmap[key]] = cast_value
+      if key in parameters.path_params:
+        actual_path_params[parameters.argmap[key]] = cast_value
+    body_value = kwargs.get('body', None)
+    media_filename = kwargs.get('media_body', None)
+
+    if self._developerKey:
+      actual_query_params['key'] = self._developerKey
+
+    model = self._model
+    if methodName.endswith('_media'):
+      model = MediaModel()
+    elif 'response' not in methodDesc:
+      model = RawModel()
+
+    headers = {}
+    headers, params, query, body = model.request(headers,
+        actual_path_params, actual_query_params, body_value)
+
+    expanded_url = uritemplate.expand(pathUrl, params)
+    url = _urljoin(self._baseUrl, expanded_url + query)
+
+    resumable = None
+    multipart_boundary = ''
+
+    if media_filename:
+      # Ensure we end up with a valid MediaUpload object.
+      if isinstance(media_filename, six.string_types):
+        (media_mime_type, encoding) = mimetypes.guess_type(media_filename)
+        if media_mime_type is None:
+          raise UnknownFileType(media_filename)
+        if not mimeparse.best_match([media_mime_type], ','.join(accept)):
+          raise UnacceptableMimeTypeError(media_mime_type)
+        media_upload = MediaFileUpload(media_filename,
+                                       mimetype=media_mime_type)
+      elif isinstance(media_filename, MediaUpload):
+        media_upload = media_filename
+      else:
+        raise TypeError('media_filename must be str or MediaUpload.')
+
+      # Check the maxSize
+      if media_upload.size() is not None and media_upload.size() > maxSize > 0:
+        raise MediaUploadSizeError("Media larger than: %s" % maxSize)
+
+      # Use the media path uri for media uploads
+      expanded_url = uritemplate.expand(mediaPathUrl, params)
+      url = _urljoin(self._baseUrl, expanded_url + query)
+      if media_upload.resumable():
+        url = _add_query_parameter(url, 'uploadType', 'resumable')
+
+      if media_upload.resumable():
+        # This is all we need to do for resumable, if the body exists it gets
+        # sent in the first request, otherwise an empty body is sent.
+        resumable = media_upload
+      else:
+        # A non-resumable upload
+        if body is None:
+          # This is a simple media upload
+          headers['content-type'] = media_upload.mimetype()
+          body = media_upload.getbytes(0, media_upload.size())
+          url = _add_query_parameter(url, 'uploadType', 'media')
+        else:
+          # This is a multipart/related upload.
+          msgRoot = MIMEMultipart('related')
+          # msgRoot should not write out it's own headers
+          setattr(msgRoot, '_write_headers', lambda self: None)
+
+          # attach the body as one part
+          msg = MIMENonMultipart(*headers['content-type'].split('/'))
+          msg.set_payload(body)
+          msgRoot.attach(msg)
+
+          # attach the media as the second part
+          msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
+          msg['Content-Transfer-Encoding'] = 'binary'
+
+          payload = media_upload.getbytes(0, media_upload.size())
+          msg.set_payload(payload)
+          msgRoot.attach(msg)
+          # encode the body: note that we can't use `as_string`, because
+          # it plays games with `From ` lines.
+          fp = BytesIO()
+          g = _BytesGenerator(fp, mangle_from_=False)
+          g.flatten(msgRoot, unixfrom=False)
+          body = fp.getvalue()
+
+          multipart_boundary = msgRoot.get_boundary()
+          headers['content-type'] = ('multipart/related; '
+                                     'boundary="%s"') % multipart_boundary
+          url = _add_query_parameter(url, 'uploadType', 'multipart')
+
+    logger.info('URL being requested: %s %s' % (httpMethod,url))
+    return self._requestBuilder(self._http,
+                                model.response,
+                                url,
+                                method=httpMethod,
+                                body=body,
+                                headers=headers,
+                                methodId=methodId,
+                                resumable=resumable)
+
+  docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
+  if len(parameters.argmap) > 0:
+    docs.append('Args:\n')
+
+  # Skip undocumented params and params common to all methods.
+  skip_parameters = list(rootDesc.get('parameters', {}).keys())
+  skip_parameters.extend(STACK_QUERY_PARAMETERS)
+
+  all_args = list(parameters.argmap.keys())
+  args_ordered = [key2param(s) for s in methodDesc.get('parameterOrder', [])]
+
+  # Move body to the front of the line.
+  if 'body' in all_args:
+    args_ordered.append('body')
+
+  for name in all_args:
+    if name not in args_ordered:
+      args_ordered.append(name)
+
+  for arg in args_ordered:
+    if arg in skip_parameters:
+      continue
+
+    repeated = ''
+    if arg in parameters.repeated_params:
+      repeated = ' (repeated)'
+    required = ''
+    if arg in parameters.required_params:
+      required = ' (required)'
+    paramdesc = methodDesc['parameters'][parameters.argmap[arg]]
+    paramdoc = paramdesc.get('description', 'A parameter')
+    if '$ref' in paramdesc:
+      docs.append(
+          ('  %s: object, %s%s%s\n    The object takes the'
+          ' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
+            schema.prettyPrintByName(paramdesc['$ref'])))
+    else:
+      paramtype = paramdesc.get('type', 'string')
+      docs.append('  %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
+                                          repeated))
+    enum = paramdesc.get('enum', [])
+    enumDesc = paramdesc.get('enumDescriptions', [])
+    if enum and enumDesc:
+      docs.append('    Allowed values\n')
+      for (name, desc) in zip(enum, enumDesc):
+        docs.append('      %s - %s\n' % (name, desc))
+  if 'response' in methodDesc:
+    if methodName.endswith('_media'):
+      docs.append('\nReturns:\n  The media object as a string.\n\n    ')
+    else:
+      docs.append('\nReturns:\n  An object of the form:\n\n    ')
+      docs.append(schema.prettyPrintSchema(methodDesc['response']))
+
+  setattr(method, '__doc__', ''.join(docs))
+  return (methodName, method)
+
+
+def createNextMethod(methodName):
+  """Creates any _next methods for attaching to a Resource.
+
+  The _next methods allow for easy iteration through list() responses.
+
+  Args:
+    methodName: string, name of the method to use.
+  """
+  methodName = fix_method_name(methodName)
+
+  def methodNext(self, previous_request, previous_response):
+    """Retrieves the next page of results.
+
+Args:
+  previous_request: The request for the previous page. (required)
+  previous_response: The response from the request for the previous page. (required)
+
+Returns:
+  A request object that you can call 'execute()' on to request the next
+  page. Returns None if there are no more items in the collection.
+    """
+    # Retrieve nextPageToken from previous_response
+    # Use as pageToken in previous_request to create new request.
+
+    if 'nextPageToken' not in previous_response or not previous_response['nextPageToken']:
+      return None
+
+    request = copy.copy(previous_request)
+
+    pageToken = previous_response['nextPageToken']
+    parsed = list(urlparse(request.uri))
+    q = parse_qsl(parsed[4])
+
+    # Find and remove old 'pageToken' value from URI
+    newq = [(key, value) for (key, value) in q if key != 'pageToken']
+    newq.append(('pageToken', pageToken))
+    parsed[4] = urlencode(newq)
+    uri = urlunparse(parsed)
+
+    request.uri = uri
+
+    logger.info('URL being requested: %s %s' % (methodName,uri))
+
+    return request
+
+  return (methodName, methodNext)
+
+
+class Resource(object):
+  """A class for interacting with a resource."""
+
+  def __init__(self, http, baseUrl, model, requestBuilder, developerKey,
+               resourceDesc, rootDesc, schema):
+    """Build a Resource from the API description.
+
+    Args:
+      http: httplib2.Http, Object to make http requests with.
+      baseUrl: string, base URL for the API. All requests are relative to this
+          URI.
+      model: googleapiclient.Model, converts to and from the wire format.
+      requestBuilder: class or callable that instantiates an
+          googleapiclient.HttpRequest object.
+      developerKey: string, key obtained from
+          https://code.google.com/apis/console
+      resourceDesc: object, section of deserialized discovery document that
+          describes a resource. Note that the top level discovery document
+          is considered a resource.
+      rootDesc: object, the entire deserialized discovery document.
+      schema: object, mapping of schema names to schema descriptions.
+    """
+    self._dynamic_attrs = []
+
+    self._http = http
+    self._baseUrl = baseUrl
+    self._model = model
+    self._developerKey = developerKey
+    self._requestBuilder = requestBuilder
+    self._resourceDesc = resourceDesc
+    self._rootDesc = rootDesc
+    self._schema = schema
+
+    self._set_service_methods()
+
+  def _set_dynamic_attr(self, attr_name, value):
+    """Sets an instance attribute and tracks it in a list of dynamic attributes.
+
+    Args:
+      attr_name: string; The name of the attribute to be set
+      value: The value being set on the object and tracked in the dynamic cache.
+    """
+    self._dynamic_attrs.append(attr_name)
+    self.__dict__[attr_name] = value
+
+  def __getstate__(self):
+    """Trim the state down to something that can be pickled.
+
+    Uses the fact that the instance variable _dynamic_attrs holds attrs that
+    will be wiped and restored on pickle serialization.
+    """
+    state_dict = copy.copy(self.__dict__)
+    for dynamic_attr in self._dynamic_attrs:
+      del state_dict[dynamic_attr]
+    del state_dict['_dynamic_attrs']
+    return state_dict
+
+  def __setstate__(self, state):
+    """Reconstitute the state of the object from being pickled.
+
+    Uses the fact that the instance variable _dynamic_attrs holds attrs that
+    will be wiped and restored on pickle serialization.
+    """
+    self.__dict__.update(state)
+    self._dynamic_attrs = []
+    self._set_service_methods()
+
+  def _set_service_methods(self):
+    self._add_basic_methods(self._resourceDesc, self._rootDesc, self._schema)
+    self._add_nested_resources(self._resourceDesc, self._rootDesc, self._schema)
+    self._add_next_methods(self._resourceDesc, self._schema)
+
+  def _add_basic_methods(self, resourceDesc, rootDesc, schema):
+    # If this is the root Resource, add a new_batch_http_request() method.
+    if resourceDesc == rootDesc:
+      batch_uri = '%s%s' % (
+        rootDesc['rootUrl'], rootDesc.get('batchPath', 'batch'))
+      def new_batch_http_request(callback=None):
+        """Create a BatchHttpRequest object based on the discovery document.
+
+        Args:
+          callback: callable, A callback to be called for each response, of the
+            form callback(id, response, exception). The first parameter is the
+            request id, and the second is the deserialized response object. The
+            third is an apiclient.errors.HttpError exception object if an HTTP
+            error occurred while processing the request, or None if no error
+            occurred.
+
+        Returns:
+          A BatchHttpRequest object based on the discovery document.
+        """
+        return BatchHttpRequest(callback=callback, batch_uri=batch_uri)
+      self._set_dynamic_attr('new_batch_http_request', new_batch_http_request)
+
+    # Add basic methods to Resource
+    if 'methods' in resourceDesc:
+      for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
+        fixedMethodName, method = createMethod(
+            methodName, methodDesc, rootDesc, schema)
+        self._set_dynamic_attr(fixedMethodName,
+                               method.__get__(self, self.__class__))
+        # Add in _media methods. The functionality of the attached method will
+        # change when it sees that the method name ends in _media.
+        if methodDesc.get('supportsMediaDownload', False):
+          fixedMethodName, method = createMethod(
+              methodName + '_media', methodDesc, rootDesc, schema)
+          self._set_dynamic_attr(fixedMethodName,
+                                 method.__get__(self, self.__class__))
+
+  def _add_nested_resources(self, resourceDesc, rootDesc, schema):
+    # Add in nested resources
+    if 'resources' in resourceDesc:
+
+      def createResourceMethod(methodName, methodDesc):
+        """Create a method on the Resource to access a nested Resource.
+
+        Args:
+          methodName: string, name of the method to use.
+          methodDesc: object, fragment of deserialized discovery document that
+            describes the method.
+        """
+        methodName = fix_method_name(methodName)
+
+        def methodResource(self):
+          return Resource(http=self._http, baseUrl=self._baseUrl,
+                          model=self._model, developerKey=self._developerKey,
+                          requestBuilder=self._requestBuilder,
+                          resourceDesc=methodDesc, rootDesc=rootDesc,
+                          schema=schema)
+
+        setattr(methodResource, '__doc__', 'A collection resource.')
+        setattr(methodResource, '__is_resource__', True)
+
+        return (methodName, methodResource)
+
+      for methodName, methodDesc in six.iteritems(resourceDesc['resources']):
+        fixedMethodName, method = createResourceMethod(methodName, methodDesc)
+        self._set_dynamic_attr(fixedMethodName,
+                               method.__get__(self, self.__class__))
+
+  def _add_next_methods(self, resourceDesc, schema):
+    # Add _next() methods
+    # Look for response bodies in schema that contain nextPageToken, and methods
+    # that take a pageToken parameter.
+    if 'methods' in resourceDesc:
+      for methodName, methodDesc in six.iteritems(resourceDesc['methods']):
+        if 'response' in methodDesc:
+          responseSchema = methodDesc['response']
+          if '$ref' in responseSchema:
+            responseSchema = schema.get(responseSchema['$ref'])
+          hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
+                                                                   {})
+          hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
+          if hasNextPageToken and hasPageToken:
+            fixedMethodName, method = createNextMethod(methodName + '_next')
+            self._set_dynamic_attr(fixedMethodName,
+                                   method.__get__(self, self.__class__))
diff --git a/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/__init__.py b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/__init__.py
new file mode 100644
index 0000000..f86a06d
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Caching utility for the discovery document."""
+
+from __future__ import absolute_import
+
+import logging
+import datetime
+
+
+LOGGER = logging.getLogger(__name__)
+
+DISCOVERY_DOC_MAX_AGE = 60 * 60 * 24  # 1 day
+
+
+def autodetect():
+  """Detects an appropriate cache module and returns it.
+
+  Returns:
+    googleapiclient.discovery_cache.base.Cache, a cache object which
+    is auto detected, or None if no cache object is available.
+  """
+  try:
+    from google.appengine.api import memcache
+    from . import appengine_memcache
+    return appengine_memcache.cache
+  except Exception:
+    try:
+      from . import file_cache
+      return file_cache.cache
+    except Exception as e:
+      LOGGER.warning(e, exc_info=True)
+      return None
diff --git a/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/appengine_memcache.py b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/appengine_memcache.py
new file mode 100644
index 0000000..7e43e66
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/appengine_memcache.py
@@ -0,0 +1,55 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""App Engine memcache based cache for the discovery document."""
+
+import logging
+
+# This is only an optional dependency because we only import this
+# module when google.appengine.api.memcache is available.
+from google.appengine.api import memcache
+
+from . import base
+from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
+
+
+LOGGER = logging.getLogger(__name__)
+
+NAMESPACE = 'google-api-client'
+
+
+class Cache(base.Cache):
+  """A cache with app engine memcache API."""
+
+  def __init__(self, max_age):
+      """Constructor.
+
+      Args:
+        max_age: Cache expiration in seconds.
+      """
+      self._max_age = max_age
+
+  def get(self, url):
+    try:
+      return memcache.get(url, namespace=NAMESPACE)
+    except Exception as e:
+      LOGGER.warning(e, exc_info=True)
+
+  def set(self, url, content):
+    try:
+      memcache.set(url, content, time=int(self._max_age), namespace=NAMESPACE)
+    except Exception as e:
+      LOGGER.warning(e, exc_info=True)
+
+cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
diff --git a/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/base.py b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/base.py
new file mode 100644
index 0000000..00e466d
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/base.py
@@ -0,0 +1,45 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""An abstract class for caching the discovery document."""
+
+import abc
+
+
+class Cache(object):
+  """A base abstract cache class."""
+  __metaclass__ = abc.ABCMeta
+
+  @abc.abstractmethod
+  def get(self, url):
+    """Gets the content from the memcache with a given key.
+
+    Args:
+      url: string, the key for the cache.
+
+    Returns:
+      object, the value in the cache for the given key, or None if the key is
+      not in the cache.
+    """
+    raise NotImplementedError()
+
+  @abc.abstractmethod
+  def set(self, url, content):
+    """Sets the given key and content in the cache.
+
+    Args:
+      url: string, the key for the cache.
+      content: string, the discovery document.
+    """
+    raise NotImplementedError()
diff --git a/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/file_cache.py b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/file_cache.py
new file mode 100644
index 0000000..31434db
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/discovery_cache/file_cache.py
@@ -0,0 +1,136 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""File based cache for the discovery document.
+
+The cache is stored in a single file so that multiple processes can
+share the same cache. It locks the file whenever accesing to the
+file. When the cache content is corrupted, it will be initialized with
+an empty cache.
+"""
+
+from __future__ import division
+
+import datetime
+import json
+import logging
+import os
+import tempfile
+import threading
+
+try:
+  from oauth2client.contrib.locked_file import LockedFile
+except ImportError:
+  # oauth2client < 2.0.0
+  from oauth2client.locked_file import LockedFile
+
+from . import base
+from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
+
+LOGGER = logging.getLogger(__name__)
+
+FILENAME = 'google-api-python-client-discovery-doc.cache'
+EPOCH = datetime.datetime.utcfromtimestamp(0)
+
+
+def _to_timestamp(date):
+  try:
+    return (date - EPOCH).total_seconds()
+  except AttributeError:
+    # The following is the equivalent of total_seconds() in Python2.6.
+    # See also: https://docs.python.org/2/library/datetime.html
+    delta = date - EPOCH
+    return ((delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
+             * 10**6) / 10**6)
+
+
+def _read_or_initialize_cache(f):
+  f.file_handle().seek(0)
+  try:
+    cache = json.load(f.file_handle())
+  except Exception:
+    # This means it opens the file for the first time, or the cache is
+    # corrupted, so initializing the file with an empty dict.
+    cache = {}
+    f.file_handle().truncate(0)
+    f.file_handle().seek(0)
+    json.dump(cache, f.file_handle())
+  return cache
+
+
+class Cache(base.Cache):
+  """A file based cache for the discovery documents."""
+
+  def __init__(self, max_age):
+      """Constructor.
+
+      Args:
+        max_age: Cache expiration in seconds.
+      """
+      self._max_age = max_age
+      self._file = os.path.join(tempfile.gettempdir(), FILENAME)
+      f = LockedFile(self._file, 'a+', 'r')
+      try:
+        f.open_and_lock()
+        if f.is_locked():
+          _read_or_initialize_cache(f)
+        # If we can not obtain the lock, other process or thread must
+        # have initialized the file.
+      except Exception as e:
+        LOGGER.warning(e, exc_info=True)
+      finally:
+        f.unlock_and_close()
+
+  def get(self, url):
+    f = LockedFile(self._file, 'r+', 'r')
+    try:
+      f.open_and_lock()
+      if f.is_locked():
+        cache = _read_or_initialize_cache(f)
+        if url in cache:
+          content, t = cache.get(url, (None, 0))
+          if _to_timestamp(datetime.datetime.now()) < t + self._max_age:
+            return content
+        return None
+      else:
+        LOGGER.debug('Could not obtain a lock for the cache file.')
+        return None
+    except Exception as e:
+      LOGGER.warning(e, exc_info=True)
+    finally:
+      f.unlock_and_close()
+
+  def set(self, url, content):
+    f = LockedFile(self._file, 'r+', 'r')
+    try:
+      f.open_and_lock()
+      if f.is_locked():
+        cache = _read_or_initialize_cache(f)
+        cache[url] = (content, _to_timestamp(datetime.datetime.now()))
+        # Remove stale cache.
+        for k, (_, timestamp) in list(cache.items()):
+          if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:
+            del cache[k]
+        f.file_handle().truncate(0)
+        f.file_handle().seek(0)
+        json.dump(cache, f.file_handle())
+      else:
+        LOGGER.debug('Could not obtain a lock for the cache file.')
+    except Exception as e:
+      LOGGER.warning(e, exc_info=True)
+    finally:
+      f.unlock_and_close()
+
+
+cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
diff --git a/utils/frozen_chromite/third_party/googleapiclient/errors.py b/utils/frozen_chromite/third_party/googleapiclient/errors.py
new file mode 100644
index 0000000..1b79d2f
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/errors.py
@@ -0,0 +1,146 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Errors for the library.
+
+All exceptions defined by the library
+should be defined in this file.
+"""
+from __future__ import absolute_import
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+import json
+
+# Oauth2client < 3 has the positional helper in 'util', >= 3 has it
+# in '_helpers'.
+try:
+  from oauth2client import util
+except ImportError:
+  from oauth2client import _helpers as util
+
+
+class Error(Exception):
+  """Base error for this module."""
+  pass
+
+
+class HttpError(Error):
+  """HTTP data was invalid or unexpected."""
+
+  @util.positional(3)
+  def __init__(self, resp, content, uri=None):
+    self.resp = resp
+    if not isinstance(content, bytes):
+        raise TypeError("HTTP content should be bytes")
+    self.content = content
+    self.uri = uri
+
+  def _get_reason(self):
+    """Calculate the reason for the error from the response content."""
+    reason = self.resp.reason
+    try:
+      data = json.loads(self.content.decode('utf-8'))
+      reason = data['error']['message']
+    except (ValueError, KeyError):
+      pass
+    if reason is None:
+      reason = ''
+    return reason
+
+  def __repr__(self):
+    if self.uri:
+      return '<HttpError %s when requesting %s returned "%s">' % (
+          self.resp.status, self.uri, self._get_reason().strip())
+    else:
+      return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
+
+  __str__ = __repr__
+
+
+class InvalidJsonError(Error):
+  """The JSON returned could not be parsed."""
+  pass
+
+
+class UnknownFileType(Error):
+  """File type unknown or unexpected."""
+  pass
+
+
+class UnknownLinkType(Error):
+  """Link type unknown or unexpected."""
+  pass
+
+
+class UnknownApiNameOrVersion(Error):
+  """No API with that name and version exists."""
+  pass
+
+
+class UnacceptableMimeTypeError(Error):
+  """That is an unacceptable mimetype for this operation."""
+  pass
+
+
+class MediaUploadSizeError(Error):
+  """Media is larger than the method can accept."""
+  pass
+
+
+class ResumableUploadError(HttpError):
+  """Error occured during resumable upload."""
+  pass
+
+
+class InvalidChunkSizeError(Error):
+  """The given chunksize is not valid."""
+  pass
+
+class InvalidNotificationError(Error):
+  """The channel Notification is invalid."""
+  pass
+
+class BatchError(HttpError):
+  """Error occured during batch operations."""
+
+  @util.positional(2)
+  def __init__(self, reason, resp=None, content=None):
+    self.resp = resp
+    self.content = content
+    self.reason = reason
+
+  def __repr__(self):
+      return '<BatchError %s "%s">' % (self.resp.status, self.reason)
+
+  __str__ = __repr__
+
+
+class UnexpectedMethodError(Error):
+  """Exception raised by RequestMockBuilder on unexpected calls."""
+
+  @util.positional(1)
+  def __init__(self, methodId=None):
+    """Constructor for an UnexpectedMethodError."""
+    super(UnexpectedMethodError, self).__init__(
+        'Received unexpected call %s' % methodId)
+
+
+class UnexpectedBodyError(Error):
+  """Exception raised by RequestMockBuilder on unexpected bodies."""
+
+  def __init__(self, expected, provided):
+    """Constructor for an UnexpectedMethodError."""
+    super(UnexpectedBodyError, self).__init__(
+        'Expected: [%s] - Provided: [%s]' % (expected, provided))
diff --git a/utils/frozen_chromite/third_party/googleapiclient/http.py b/utils/frozen_chromite/third_party/googleapiclient/http.py
new file mode 100644
index 0000000..34f1081
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/http.py
@@ -0,0 +1,1730 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Classes to encapsulate a single HTTP request.
+
+The classes implement a command pattern, with every
+object supporting an execute() method that does the
+actuall HTTP request.
+"""
+from __future__ import absolute_import
+import six
+from six.moves import http_client
+from six.moves import range
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+from six import BytesIO, StringIO
+from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote
+
+import base64
+import copy
+import gzip
+import httplib2
+import json
+import logging
+import mimetypes
+import os
+import random
+import socket
+import sys
+import time
+import uuid
+
+# TODO(issue 221): Remove this conditional import jibbajabba.
+try:
+  import ssl
+except ImportError:
+  _ssl_SSLError = object()
+else:
+  _ssl_SSLError = ssl.SSLError
+
+from email.generator import Generator
+from email.mime.multipart import MIMEMultipart
+from email.mime.nonmultipart import MIMENonMultipart
+from email.parser import FeedParser
+
+# Oauth2client < 3 has the positional helper in 'util', >= 3 has it
+# in '_helpers'.
+try:
+  from oauth2client import util
+except ImportError:
+  from oauth2client import _helpers as util
+
+from googleapiclient import mimeparse
+from googleapiclient.errors import BatchError
+from googleapiclient.errors import HttpError
+from googleapiclient.errors import InvalidChunkSizeError
+from googleapiclient.errors import ResumableUploadError
+from googleapiclient.errors import UnexpectedBodyError
+from googleapiclient.errors import UnexpectedMethodError
+from googleapiclient.model import JsonModel
+
+
+LOGGER = logging.getLogger(__name__)
+
+DEFAULT_CHUNK_SIZE = 512*1024
+
+MAX_URI_LENGTH = 2048
+
+_TOO_MANY_REQUESTS = 429
+
+
+def _should_retry_response(resp_status, content):
+  """Determines whether a response should be retried.
+
+  Args:
+    resp_status: The response status received.
+    content: The response content body.
+
+  Returns:
+    True if the response should be retried, otherwise False.
+  """
+  # Retry on 5xx errors.
+  if resp_status >= 500:
+    return True
+
+  # Retry on 429 errors.
+  if resp_status == _TOO_MANY_REQUESTS:
+    return True
+
+  # For 403 errors, we have to check for the `reason` in the response to
+  # determine if we should retry.
+  if resp_status == six.moves.http_client.FORBIDDEN:
+    # If there's no details about the 403 type, don't retry.
+    if not content:
+      return False
+
+    # Content is in JSON format.
+    try:
+      data = json.loads(content.decode('utf-8'))
+      reason = data['error']['errors'][0]['reason']
+    except (UnicodeDecodeError, ValueError, KeyError):
+      LOGGER.warning('Invalid JSON content from response: %s', content)
+      return False
+
+    LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason)
+
+    # Only retry on rate limit related failures.
+    if reason in ('userRateLimitExceeded', 'rateLimitExceeded', ):
+      return True
+
+  # Everything else is a success or non-retriable so break.
+  return False
+
+
+def _retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args,
+                   **kwargs):
+  """Retries an HTTP request multiple times while handling errors.
+
+  If after all retries the request still fails, last error is either returned as
+  return value (for HTTP 5xx errors) or thrown (for ssl.SSLError).
+
+  Args:
+    http: Http object to be used to execute request.
+    num_retries: Maximum number of retries.
+    req_type: Type of the request (used for logging retries).
+    sleep, rand: Functions to sleep for random time between retries.
+    uri: URI to be requested.
+    method: HTTP method to be used.
+    args, kwargs: Additional arguments passed to http.request.
+
+  Returns:
+    resp, content - Response from the http request (may be HTTP 5xx).
+  """
+  resp = None
+  content = None
+  for retry_num in range(num_retries + 1):
+    if retry_num > 0:
+      # Sleep before retrying.
+      sleep_time = rand() * 2 ** retry_num
+      LOGGER.warning(
+          'Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s',
+          sleep_time, retry_num, num_retries, req_type, method, uri,
+          resp.status if resp else exception)
+      sleep(sleep_time)
+
+    try:
+      exception = None
+      resp, content = http.request(uri, method, *args, **kwargs)
+    # Retry on SSL errors and socket timeout errors.
+    except _ssl_SSLError as ssl_error:
+      exception = ssl_error
+    except socket.error as socket_error:
+      # errno's contents differ by platform, so we have to match by name.
+      if socket.errno.errorcode.get(socket_error.errno) not in (
+          'WSAETIMEDOUT', 'ETIMEDOUT', 'EPIPE', 'ECONNABORTED', ):
+        raise
+      exception = socket_error
+
+    if exception:
+      if retry_num == num_retries:
+        raise exception
+      else:
+        continue
+
+    if not _should_retry_response(resp.status, content):
+      break
+
+  return resp, content
+
+
+class MediaUploadProgress(object):
+  """Status of a resumable upload."""
+
+  def __init__(self, resumable_progress, total_size):
+    """Constructor.
+
+    Args:
+      resumable_progress: int, bytes sent so far.
+      total_size: int, total bytes in complete upload, or None if the total
+        upload size isn't known ahead of time.
+    """
+    self.resumable_progress = resumable_progress
+    self.total_size = total_size
+
+  def progress(self):
+    """Percent of upload completed, as a float.
+
+    Returns:
+      the percentage complete as a float, returning 0.0 if the total size of
+      the upload is unknown.
+    """
+    if self.total_size is not None:
+      return float(self.resumable_progress) / float(self.total_size)
+    else:
+      return 0.0
+
+
+class MediaDownloadProgress(object):
+  """Status of a resumable download."""
+
+  def __init__(self, resumable_progress, total_size):
+    """Constructor.
+
+    Args:
+      resumable_progress: int, bytes received so far.
+      total_size: int, total bytes in complete download.
+    """
+    self.resumable_progress = resumable_progress
+    self.total_size = total_size
+
+  def progress(self):
+    """Percent of download completed, as a float.
+
+    Returns:
+      the percentage complete as a float, returning 0.0 if the total size of
+      the download is unknown.
+    """
+    if self.total_size is not None:
+      return float(self.resumable_progress) / float(self.total_size)
+    else:
+      return 0.0
+
+
+class MediaUpload(object):
+  """Describes a media object to upload.
+
+  Base class that defines the interface of MediaUpload subclasses.
+
+  Note that subclasses of MediaUpload may allow you to control the chunksize
+  when uploading a media object. It is important to keep the size of the chunk
+  as large as possible to keep the upload efficient. Other factors may influence
+  the size of the chunk you use, particularly if you are working in an
+  environment where individual HTTP requests may have a hardcoded time limit,
+  such as under certain classes of requests under Google App Engine.
+
+  Streams are io.Base compatible objects that support seek(). Some MediaUpload
+  subclasses support using streams directly to upload data. Support for
+  streaming may be indicated by a MediaUpload sub-class and if appropriate for a
+  platform that stream will be used for uploading the media object. The support
+  for streaming is indicated by has_stream() returning True. The stream() method
+  should return an io.Base object that supports seek(). On platforms where the
+  underlying httplib module supports streaming, for example Python 2.6 and
+  later, the stream will be passed into the http library which will result in
+  less memory being used and possibly faster uploads.
+
+  If you need to upload media that can't be uploaded using any of the existing
+  MediaUpload sub-class then you can sub-class MediaUpload for your particular
+  needs.
+  """
+
+  def chunksize(self):
+    """Chunk size for resumable uploads.
+
+    Returns:
+      Chunk size in bytes.
+    """
+    raise NotImplementedError()
+
+  def mimetype(self):
+    """Mime type of the body.
+
+    Returns:
+      Mime type.
+    """
+    return 'application/octet-stream'
+
+  def size(self):
+    """Size of upload.
+
+    Returns:
+      Size of the body, or None of the size is unknown.
+    """
+    return None
+
+  def resumable(self):
+    """Whether this upload is resumable.
+
+    Returns:
+      True if resumable upload or False.
+    """
+    return False
+
+  def getbytes(self, begin, end):
+    """Get bytes from the media.
+
+    Args:
+      begin: int, offset from beginning of file.
+      length: int, number of bytes to read, starting at begin.
+
+    Returns:
+      A string of bytes read. May be shorter than length if EOF was reached
+      first.
+    """
+    raise NotImplementedError()
+
+  def has_stream(self):
+    """Does the underlying upload support a streaming interface.
+
+    Streaming means it is an io.IOBase subclass that supports seek, i.e.
+    seekable() returns True.
+
+    Returns:
+      True if the call to stream() will return an instance of a seekable io.Base
+      subclass.
+    """
+    return False
+
+  def stream(self):
+    """A stream interface to the data being uploaded.
+
+    Returns:
+      The returned value is an io.IOBase subclass that supports seek, i.e.
+      seekable() returns True.
+    """
+    raise NotImplementedError()
+
+  @util.positional(1)
+  def _to_json(self, strip=None):
+    """Utility function for creating a JSON representation of a MediaUpload.
+
+    Args:
+      strip: array, An array of names of members to not include in the JSON.
+
+    Returns:
+       string, a JSON representation of this instance, suitable to pass to
+       from_json().
+    """
+    t = type(self)
+    d = copy.copy(self.__dict__)
+    if strip is not None:
+      for member in strip:
+        del d[member]
+    d['_class'] = t.__name__
+    d['_module'] = t.__module__
+    return json.dumps(d)
+
+  def to_json(self):
+    """Create a JSON representation of an instance of MediaUpload.
+
+    Returns:
+       string, a JSON representation of this instance, suitable to pass to
+       from_json().
+    """
+    return self._to_json()
+
+  @classmethod
+  def new_from_json(cls, s):
+    """Utility class method to instantiate a MediaUpload subclass from a JSON
+    representation produced by to_json().
+
+    Args:
+      s: string, JSON from to_json().
+
+    Returns:
+      An instance of the subclass of MediaUpload that was serialized with
+      to_json().
+    """
+    data = json.loads(s)
+    # Find and call the right classmethod from_json() to restore the object.
+    module = data['_module']
+    m = __import__(module, fromlist=module.split('.')[:-1])
+    kls = getattr(m, data['_class'])
+    from_json = getattr(kls, 'from_json')
+    return from_json(s)
+
+
+class MediaIoBaseUpload(MediaUpload):
+  """A MediaUpload for a io.Base objects.
+
+  Note that the Python file object is compatible with io.Base and can be used
+  with this class also.
+
+    fh = BytesIO('...Some data to upload...')
+    media = MediaIoBaseUpload(fh, mimetype='image/png',
+      chunksize=1024*1024, resumable=True)
+    farm.animals().insert(
+        id='cow',
+        name='cow.png',
+        media_body=media).execute()
+
+  Depending on the platform you are working on, you may pass -1 as the
+  chunksize, which indicates that the entire file should be uploaded in a single
+  request. If the underlying platform supports streams, such as Python 2.6 or
+  later, then this can be very efficient as it avoids multiple connections, and
+  also avoids loading the entire file into memory before sending it. Note that
+  Google App Engine has a 5MB limit on request size, so you should never set
+  your chunksize larger than 5MB, or to -1.
+  """
+
+  @util.positional(3)
+  def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE,
+      resumable=False):
+    """Constructor.
+
+    Args:
+      fd: io.Base or file object, The source of the bytes to upload. MUST be
+        opened in blocking mode, do not use streams opened in non-blocking mode.
+        The given stream must be seekable, that is, it must be able to call
+        seek() on fd.
+      mimetype: string, Mime-type of the file.
+      chunksize: int, File will be uploaded in chunks of this many bytes. Only
+        used if resumable=True. Pass in a value of -1 if the file is to be
+        uploaded as a single chunk. Note that Google App Engine has a 5MB limit
+        on request size, so you should never set your chunksize larger than 5MB,
+        or to -1.
+      resumable: bool, True if this is a resumable upload. False means upload
+        in a single request.
+    """
+    super(MediaIoBaseUpload, self).__init__()
+    self._fd = fd
+    self._mimetype = mimetype
+    if not (chunksize == -1 or chunksize > 0):
+      raise InvalidChunkSizeError()
+    self._chunksize = chunksize
+    self._resumable = resumable
+
+    self._fd.seek(0, os.SEEK_END)
+    self._size = self._fd.tell()
+
+  def chunksize(self):
+    """Chunk size for resumable uploads.
+
+    Returns:
+      Chunk size in bytes.
+    """
+    return self._chunksize
+
+  def mimetype(self):
+    """Mime type of the body.
+
+    Returns:
+      Mime type.
+    """
+    return self._mimetype
+
+  def size(self):
+    """Size of upload.
+
+    Returns:
+      Size of the body, or None of the size is unknown.
+    """
+    return self._size
+
+  def resumable(self):
+    """Whether this upload is resumable.
+
+    Returns:
+      True if resumable upload or False.
+    """
+    return self._resumable
+
+  def getbytes(self, begin, length):
+    """Get bytes from the media.
+
+    Args:
+      begin: int, offset from beginning of file.
+      length: int, number of bytes to read, starting at begin.
+
+    Returns:
+      A string of bytes read. May be shorted than length if EOF was reached
+      first.
+    """
+    self._fd.seek(begin)
+    return self._fd.read(length)
+
+  def has_stream(self):
+    """Does the underlying upload support a streaming interface.
+
+    Streaming means it is an io.IOBase subclass that supports seek, i.e.
+    seekable() returns True.
+
+    Returns:
+      True if the call to stream() will return an instance of a seekable io.Base
+      subclass.
+    """
+    return True
+
+  def stream(self):
+    """A stream interface to the data being uploaded.
+
+    Returns:
+      The returned value is an io.IOBase subclass that supports seek, i.e.
+      seekable() returns True.
+    """
+    return self._fd
+
+  def to_json(self):
+    """This upload type is not serializable."""
+    raise NotImplementedError('MediaIoBaseUpload is not serializable.')
+
+
+class MediaFileUpload(MediaIoBaseUpload):
+  """A MediaUpload for a file.
+
+  Construct a MediaFileUpload and pass as the media_body parameter of the
+  method. For example, if we had a service that allowed uploading images:
+
+
+    media = MediaFileUpload('cow.png', mimetype='image/png',
+      chunksize=1024*1024, resumable=True)
+    farm.animals().insert(
+        id='cow',
+        name='cow.png',
+        media_body=media).execute()
+
+  Depending on the platform you are working on, you may pass -1 as the
+  chunksize, which indicates that the entire file should be uploaded in a single
+  request. If the underlying platform supports streams, such as Python 2.6 or
+  later, then this can be very efficient as it avoids multiple connections, and
+  also avoids loading the entire file into memory before sending it. Note that
+  Google App Engine has a 5MB limit on request size, so you should never set
+  your chunksize larger than 5MB, or to -1.
+  """
+
+  @util.positional(2)
+  def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE,
+               resumable=False):
+    """Constructor.
+
+    Args:
+      filename: string, Name of the file.
+      mimetype: string, Mime-type of the file. If None then a mime-type will be
+        guessed from the file extension.
+      chunksize: int, File will be uploaded in chunks of this many bytes. Only
+        used if resumable=True. Pass in a value of -1 if the file is to be
+        uploaded in a single chunk. Note that Google App Engine has a 5MB limit
+        on request size, so you should never set your chunksize larger than 5MB,
+        or to -1.
+      resumable: bool, True if this is a resumable upload. False means upload
+        in a single request.
+    """
+    self._filename = filename
+    fd = open(self._filename, 'rb')
+    if mimetype is None:
+      # No mimetype provided, make a guess.
+      mimetype, _ = mimetypes.guess_type(filename)
+      if mimetype is None:
+        # Guess failed, use octet-stream.
+        mimetype = 'application/octet-stream'
+    super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize,
+                                          resumable=resumable)
+
+  def to_json(self):
+    """Creating a JSON representation of an instance of MediaFileUpload.
+
+    Returns:
+       string, a JSON representation of this instance, suitable to pass to
+       from_json().
+    """
+    return self._to_json(strip=['_fd'])
+
+  @staticmethod
+  def from_json(s):
+    d = json.loads(s)
+    return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'],
+                           chunksize=d['_chunksize'], resumable=d['_resumable'])
+
+
+class MediaInMemoryUpload(MediaIoBaseUpload):
+  """MediaUpload for a chunk of bytes.
+
+  DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
+  the stream.
+  """
+
+  @util.positional(2)
+  def __init__(self, body, mimetype='application/octet-stream',
+               chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
+    """Create a new MediaInMemoryUpload.
+
+  DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for
+  the stream.
+
+  Args:
+    body: string, Bytes of body content.
+    mimetype: string, Mime-type of the file or default of
+      'application/octet-stream'.
+    chunksize: int, File will be uploaded in chunks of this many bytes. Only
+      used if resumable=True.
+    resumable: bool, True if this is a resumable upload. False means upload
+      in a single request.
+    """
+    fd = BytesIO(body)
+    super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize,
+                                              resumable=resumable)
+
+
+class MediaIoBaseDownload(object):
+  """"Download media resources.
+
+  Note that the Python file object is compatible with io.Base and can be used
+  with this class also.
+
+
+  Example:
+    request = farms.animals().get_media(id='cow')
+    fh = io.FileIO('cow.png', mode='wb')
+    downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024)
+
+    done = False
+    while done is False:
+      status, done = downloader.next_chunk()
+      if status:
+        print "Download %d%%." % int(status.progress() * 100)
+    print "Download Complete!"
+  """
+
+  @util.positional(3)
+  def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
+    """Constructor.
+
+    Args:
+      fd: io.Base or file object, The stream in which to write the downloaded
+        bytes.
+      request: googleapiclient.http.HttpRequest, the media request to perform in
+        chunks.
+      chunksize: int, File will be downloaded in chunks of this many bytes.
+    """
+    self._fd = fd
+    self._request = request
+    self._uri = request.uri
+    self._chunksize = chunksize
+    self._progress = 0
+    self._total_size = None
+    self._done = False
+
+    # Stubs for testing.
+    self._sleep = time.sleep
+    self._rand = random.random
+
+  @util.positional(1)
+  def next_chunk(self, num_retries=0):
+    """Get the next chunk of the download.
+
+    Args:
+      num_retries: Integer, number of times to retry with randomized
+            exponential backoff. If all retries fail, the raised HttpError
+            represents the last request. If zero (default), we attempt the
+            request only once.
+
+    Returns:
+      (status, done): (MediaDownloadStatus, boolean)
+         The value of 'done' will be True when the media has been fully
+         downloaded.
+
+    Raises:
+      googleapiclient.errors.HttpError if the response was not a 2xx.
+      httplib2.HttpLib2Error if a transport error has occured.
+    """
+    headers = {
+        'range': 'bytes=%d-%d' % (
+            self._progress, self._progress + self._chunksize)
+        }
+    http = self._request.http
+
+    resp, content = _retry_request(
+        http, num_retries, 'media download', self._sleep, self._rand, self._uri,
+        'GET', headers=headers)
+
+    if resp.status in [200, 206]:
+      if 'content-location' in resp and resp['content-location'] != self._uri:
+        self._uri = resp['content-location']
+      self._progress += len(content)
+      self._fd.write(content)
+
+      if 'content-range' in resp:
+        content_range = resp['content-range']
+        length = content_range.rsplit('/', 1)[1]
+        self._total_size = int(length)
+      elif 'content-length' in resp:
+        self._total_size = int(resp['content-length'])
+
+      if self._progress == self._total_size:
+        self._done = True
+      return MediaDownloadProgress(self._progress, self._total_size), self._done
+    else:
+      raise HttpError(resp, content, uri=self._uri)
+
+
+class _StreamSlice(object):
+  """Truncated stream.
+
+  Takes a stream and presents a stream that is a slice of the original stream.
+  This is used when uploading media in chunks. In later versions of Python a
+  stream can be passed to httplib in place of the string of data to send. The
+  problem is that httplib just blindly reads to the end of the stream. This
+  wrapper presents a virtual stream that only reads to the end of the chunk.
+  """
+
+  def __init__(self, stream, begin, chunksize):
+    """Constructor.
+
+    Args:
+      stream: (io.Base, file object), the stream to wrap.
+      begin: int, the seek position the chunk begins at.
+      chunksize: int, the size of the chunk.
+    """
+    self._stream = stream
+    self._begin = begin
+    self._chunksize = chunksize
+    self._stream.seek(begin)
+
+  def read(self, n=-1):
+    """Read n bytes.
+
+    Args:
+      n, int, the number of bytes to read.
+
+    Returns:
+      A string of length 'n', or less if EOF is reached.
+    """
+    # The data left available to read sits in [cur, end)
+    cur = self._stream.tell()
+    end = self._begin + self._chunksize
+    if n == -1 or cur + n > end:
+      n = end - cur
+    return self._stream.read(n)
+
+
+class HttpRequest(object):
+  """Encapsulates a single HTTP request."""
+
+  @util.positional(4)
+  def __init__(self, http, postproc, uri,
+               method='GET',
+               body=None,
+               headers=None,
+               methodId=None,
+               resumable=None):
+    """Constructor for an HttpRequest.
+
+    Args:
+      http: httplib2.Http, the transport object to use to make a request
+      postproc: callable, called on the HTTP response and content to transform
+                it into a data object before returning, or raising an exception
+                on an error.
+      uri: string, the absolute URI to send the request to
+      method: string, the HTTP method to use
+      body: string, the request body of the HTTP request,
+      headers: dict, the HTTP request headers
+      methodId: string, a unique identifier for the API method being called.
+      resumable: MediaUpload, None if this is not a resumbale request.
+    """
+    self.uri = uri
+    self.method = method
+    self.body = body
+    self.headers = headers or {}
+    self.methodId = methodId
+    self.http = http
+    self.postproc = postproc
+    self.resumable = resumable
+    self.response_callbacks = []
+    self._in_error_state = False
+
+    # Pull the multipart boundary out of the content-type header.
+    major, minor, params = mimeparse.parse_mime_type(
+        self.headers.get('content-type', 'application/json'))
+
+    # The size of the non-media part of the request.
+    self.body_size = len(self.body or '')
+
+    # The resumable URI to send chunks to.
+    self.resumable_uri = None
+
+    # The bytes that have been uploaded.
+    self.resumable_progress = 0
+
+    # Stubs for testing.
+    self._rand = random.random
+    self._sleep = time.sleep
+
+  @util.positional(1)
+  def execute(self, http=None, num_retries=0):
+    """Execute the request.
+
+    Args:
+      http: httplib2.Http, an http object to be used in place of the
+            one the HttpRequest request object was constructed with.
+      num_retries: Integer, number of times to retry with randomized
+            exponential backoff. If all retries fail, the raised HttpError
+            represents the last request. If zero (default), we attempt the
+            request only once.
+
+    Returns:
+      A deserialized object model of the response body as determined
+      by the postproc.
+
+    Raises:
+      googleapiclient.errors.HttpError if the response was not a 2xx.
+      httplib2.HttpLib2Error if a transport error has occured.
+    """
+    if http is None:
+      http = self.http
+
+    if self.resumable:
+      body = None
+      while body is None:
+        _, body = self.next_chunk(http=http, num_retries=num_retries)
+      return body
+
+    # Non-resumable case.
+
+    if 'content-length' not in self.headers:
+      self.headers['content-length'] = str(self.body_size)
+    # If the request URI is too long then turn it into a POST request.
+    if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET':
+      self.method = 'POST'
+      self.headers['x-http-method-override'] = 'GET'
+      self.headers['content-type'] = 'application/x-www-form-urlencoded'
+      parsed = urlparse(self.uri)
+      self.uri = urlunparse(
+          (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None,
+           None)
+          )
+      self.body = parsed.query
+      self.headers['content-length'] = str(len(self.body))
+
+    # Handle retries for server-side errors.
+    resp, content = _retry_request(
+          http, num_retries, 'request', self._sleep, self._rand, str(self.uri),
+          method=str(self.method), body=self.body, headers=self.headers)
+
+    for callback in self.response_callbacks:
+      callback(resp)
+    if resp.status >= 300:
+      raise HttpError(resp, content, uri=self.uri)
+    return self.postproc(resp, content)
+
+  @util.positional(2)
+  def add_response_callback(self, cb):
+    """add_response_headers_callback
+
+    Args:
+      cb: Callback to be called on receiving the response headers, of signature:
+
+      def cb(resp):
+        # Where resp is an instance of httplib2.Response
+    """
+    self.response_callbacks.append(cb)
+
+  @util.positional(1)
+  def next_chunk(self, http=None, num_retries=0):
+    """Execute the next step of a resumable upload.
+
+    Can only be used if the method being executed supports media uploads and
+    the MediaUpload object passed in was flagged as using resumable upload.
+
+    Example:
+
+      media = MediaFileUpload('cow.png', mimetype='image/png',
+                              chunksize=1000, resumable=True)
+      request = farm.animals().insert(
+          id='cow',
+          name='cow.png',
+          media_body=media)
+
+      response = None
+      while response is None:
+        status, response = request.next_chunk()
+        if status:
+          print "Upload %d%% complete." % int(status.progress() * 100)
+
+
+    Args:
+      http: httplib2.Http, an http object to be used in place of the
+            one the HttpRequest request object was constructed with.
+      num_retries: Integer, number of times to retry with randomized
+            exponential backoff. If all retries fail, the raised HttpError
+            represents the last request. If zero (default), we attempt the
+            request only once.
+
+    Returns:
+      (status, body): (ResumableMediaStatus, object)
+         The body will be None until the resumable media is fully uploaded.
+
+    Raises:
+      googleapiclient.errors.HttpError if the response was not a 2xx.
+      httplib2.HttpLib2Error if a transport error has occured.
+    """
+    if http is None:
+      http = self.http
+
+    if self.resumable.size() is None:
+      size = '*'
+    else:
+      size = str(self.resumable.size())
+
+    if self.resumable_uri is None:
+      start_headers = copy.copy(self.headers)
+      start_headers['X-Upload-Content-Type'] = self.resumable.mimetype()
+      if size != '*':
+        start_headers['X-Upload-Content-Length'] = size
+      start_headers['content-length'] = str(self.body_size)
+
+      resp, content = _retry_request(
+          http, num_retries, 'resumable URI request', self._sleep, self._rand,
+          self.uri, method=self.method, body=self.body, headers=start_headers)
+
+      if resp.status == 200 and 'location' in resp:
+        self.resumable_uri = resp['location']
+      else:
+        raise ResumableUploadError(resp, content)
+    elif self._in_error_state:
+      # If we are in an error state then query the server for current state of
+      # the upload by sending an empty PUT and reading the 'range' header in
+      # the response.
+      headers = {
+          'Content-Range': 'bytes */%s' % size,
+          'content-length': '0'
+          }
+      resp, content = http.request(self.resumable_uri, 'PUT',
+                                   headers=headers)
+      status, body = self._process_response(resp, content)
+      if body:
+        # The upload was complete.
+        return (status, body)
+
+    if self.resumable.has_stream():
+      data = self.resumable.stream()
+      if self.resumable.chunksize() == -1:
+        data.seek(self.resumable_progress)
+        chunk_end = self.resumable.size() - self.resumable_progress - 1
+      else:
+        # Doing chunking with a stream, so wrap a slice of the stream.
+        data = _StreamSlice(data, self.resumable_progress,
+                            self.resumable.chunksize())
+        chunk_end = min(
+            self.resumable_progress + self.resumable.chunksize() - 1,
+            self.resumable.size() - 1)
+    else:
+      data = self.resumable.getbytes(
+          self.resumable_progress, self.resumable.chunksize())
+
+      # A short read implies that we are at EOF, so finish the upload.
+      if len(data) < self.resumable.chunksize():
+        size = str(self.resumable_progress + len(data))
+
+      chunk_end = self.resumable_progress + len(data) - 1
+
+    headers = {
+        'Content-Range': 'bytes %d-%d/%s' % (
+            self.resumable_progress, chunk_end, size),
+        # Must set the content-length header here because httplib can't
+        # calculate the size when working with _StreamSlice.
+        'Content-Length': str(chunk_end - self.resumable_progress + 1)
+        }
+
+    for retry_num in range(num_retries + 1):
+      if retry_num > 0:
+        self._sleep(self._rand() * 2**retry_num)
+        LOGGER.warning(
+            'Retry #%d for media upload: %s %s, following status: %d'
+            % (retry_num, self.method, self.uri, resp.status))
+
+      try:
+        resp, content = http.request(self.resumable_uri, method='PUT',
+                                     body=data,
+                                     headers=headers)
+      except:
+        self._in_error_state = True
+        raise
+      if not _should_retry_response(resp.status, content):
+        break
+
+    return self._process_response(resp, content)
+
+  def _process_response(self, resp, content):
+    """Process the response from a single chunk upload.
+
+    Args:
+      resp: httplib2.Response, the response object.
+      content: string, the content of the response.
+
+    Returns:
+      (status, body): (ResumableMediaStatus, object)
+         The body will be None until the resumable media is fully uploaded.
+
+    Raises:
+      googleapiclient.errors.HttpError if the response was not a 2xx or a 308.
+    """
+    if resp.status in [200, 201]:
+      self._in_error_state = False
+      return None, self.postproc(resp, content)
+    elif resp.status == 308:
+      self._in_error_state = False
+      # A "308 Resume Incomplete" indicates we are not done.
+      self.resumable_progress = int(resp['range'].split('-')[1]) + 1
+      if 'location' in resp:
+        self.resumable_uri = resp['location']
+    else:
+      self._in_error_state = True
+      raise HttpError(resp, content, uri=self.uri)
+
+    return (MediaUploadProgress(self.resumable_progress, self.resumable.size()),
+            None)
+
+  def to_json(self):
+    """Returns a JSON representation of the HttpRequest."""
+    d = copy.copy(self.__dict__)
+    if d['resumable'] is not None:
+      d['resumable'] = self.resumable.to_json()
+    del d['http']
+    del d['postproc']
+    del d['_sleep']
+    del d['_rand']
+
+    return json.dumps(d)
+
+  @staticmethod
+  def from_json(s, http, postproc):
+    """Returns an HttpRequest populated with info from a JSON object."""
+    d = json.loads(s)
+    if d['resumable'] is not None:
+      d['resumable'] = MediaUpload.new_from_json(d['resumable'])
+    return HttpRequest(
+        http,
+        postproc,
+        uri=d['uri'],
+        method=d['method'],
+        body=d['body'],
+        headers=d['headers'],
+        methodId=d['methodId'],
+        resumable=d['resumable'])
+
+
+class BatchHttpRequest(object):
+  """Batches multiple HttpRequest objects into a single HTTP request.
+
+  Example:
+    from googleapiclient.http import BatchHttpRequest
+
+    def list_animals(request_id, response, exception):
+      \"\"\"Do something with the animals list response.\"\"\"
+      if exception is not None:
+        # Do something with the exception.
+        pass
+      else:
+        # Do something with the response.
+        pass
+
+    def list_farmers(request_id, response, exception):
+      \"\"\"Do something with the farmers list response.\"\"\"
+      if exception is not None:
+        # Do something with the exception.
+        pass
+      else:
+        # Do something with the response.
+        pass
+
+    service = build('farm', 'v2')
+
+    batch = BatchHttpRequest()
+
+    batch.add(service.animals().list(), list_animals)
+    batch.add(service.farmers().list(), list_farmers)
+    batch.execute(http=http)
+  """
+
+  @util.positional(1)
+  def __init__(self, callback=None, batch_uri=None):
+    """Constructor for a BatchHttpRequest.
+
+    Args:
+      callback: callable, A callback to be called for each response, of the
+        form callback(id, response, exception). The first parameter is the
+        request id, and the second is the deserialized response object. The
+        third is an googleapiclient.errors.HttpError exception object if an HTTP error
+        occurred while processing the request, or None if no error occurred.
+      batch_uri: string, URI to send batch requests to.
+    """
+    if batch_uri is None:
+      batch_uri = 'https://www.googleapis.com/batch'
+    self._batch_uri = batch_uri
+
+    # Global callback to be called for each individual response in the batch.
+    self._callback = callback
+
+    # A map from id to request.
+    self._requests = {}
+
+    # A map from id to callback.
+    self._callbacks = {}
+
+    # List of request ids, in the order in which they were added.
+    self._order = []
+
+    # The last auto generated id.
+    self._last_auto_id = 0
+
+    # Unique ID on which to base the Content-ID headers.
+    self._base_id = None
+
+    # A map from request id to (httplib2.Response, content) response pairs
+    self._responses = {}
+
+    # A map of id(Credentials) that have been refreshed.
+    self._refreshed_credentials = {}
+
+  def _refresh_and_apply_credentials(self, request, http):
+    """Refresh the credentials and apply to the request.
+
+    Args:
+      request: HttpRequest, the request.
+      http: httplib2.Http, the global http object for the batch.
+    """
+    # For the credentials to refresh, but only once per refresh_token
+    # If there is no http per the request then refresh the http passed in
+    # via execute()
+    creds = None
+    if request.http is not None and hasattr(request.http.request,
+        'credentials'):
+      creds = request.http.request.credentials
+    elif http is not None and hasattr(http.request, 'credentials'):
+      creds = http.request.credentials
+    if creds is not None:
+      if id(creds) not in self._refreshed_credentials:
+        creds.refresh(http)
+        self._refreshed_credentials[id(creds)] = 1
+
+    # Only apply the credentials if we are using the http object passed in,
+    # otherwise apply() will get called during _serialize_request().
+    if request.http is None or not hasattr(request.http.request,
+        'credentials'):
+      creds.apply(request.headers)
+
+  def _id_to_header(self, id_):
+    """Convert an id to a Content-ID header value.
+
+    Args:
+      id_: string, identifier of individual request.
+
+    Returns:
+      A Content-ID header with the id_ encoded into it. A UUID is prepended to
+      the value because Content-ID headers are supposed to be universally
+      unique.
+    """
+    if self._base_id is None:
+      self._base_id = uuid.uuid4()
+
+    return '<%s+%s>' % (self._base_id, quote(id_))
+
+  def _header_to_id(self, header):
+    """Convert a Content-ID header value to an id.
+
+    Presumes the Content-ID header conforms to the format that _id_to_header()
+    returns.
+
+    Args:
+      header: string, Content-ID header value.
+
+    Returns:
+      The extracted id value.
+
+    Raises:
+      BatchError if the header is not in the expected format.
+    """
+    if header[0] != '<' or header[-1] != '>':
+      raise BatchError("Invalid value for Content-ID: %s" % header)
+    if '+' not in header:
+      raise BatchError("Invalid value for Content-ID: %s" % header)
+    base, id_ = header[1:-1].rsplit('+', 1)
+
+    return unquote(id_)
+
+  def _serialize_request(self, request):
+    """Convert an HttpRequest object into a string.
+
+    Args:
+      request: HttpRequest, the request to serialize.
+
+    Returns:
+      The request as a string in application/http format.
+    """
+    # Construct status line
+    parsed = urlparse(request.uri)
+    request_line = urlunparse(
+        ('', '', parsed.path, parsed.params, parsed.query, '')
+        )
+    status_line = request.method + ' ' + request_line + ' HTTP/1.1\n'
+    major, minor = request.headers.get('content-type', 'application/json').split('/')
+    msg = MIMENonMultipart(major, minor)
+    headers = request.headers.copy()
+
+    if request.http is not None and hasattr(request.http.request,
+        'credentials'):
+      request.http.request.credentials.apply(headers)
+
+    # MIMENonMultipart adds its own Content-Type header.
+    if 'content-type' in headers:
+      del headers['content-type']
+
+    for key, value in six.iteritems(headers):
+      msg[key] = value
+    msg['Host'] = parsed.netloc
+    msg.set_unixfrom(None)
+
+    if request.body is not None:
+      msg.set_payload(request.body)
+      msg['content-length'] = str(len(request.body))
+
+    # Serialize the mime message.
+    fp = StringIO()
+    # maxheaderlen=0 means don't line wrap headers.
+    g = Generator(fp, maxheaderlen=0)
+    g.flatten(msg, unixfrom=False)
+    body = fp.getvalue()
+
+    return status_line + body
+
+  def _deserialize_response(self, payload):
+    """Convert string into httplib2 response and content.
+
+    Args:
+      payload: string, headers and body as a string.
+
+    Returns:
+      A pair (resp, content), such as would be returned from httplib2.request.
+    """
+    # Strip off the status line
+    status_line, payload = payload.split('\n', 1)
+    protocol, status, reason = status_line.split(' ', 2)
+
+    # Parse the rest of the response
+    parser = FeedParser()
+    parser.feed(payload)
+    msg = parser.close()
+    msg['status'] = status
+
+    # Create httplib2.Response from the parsed headers.
+    resp = httplib2.Response(msg)
+    resp.reason = reason
+    resp.version = int(protocol.split('/', 1)[1].replace('.', ''))
+
+    content = payload.split('\r\n\r\n', 1)[1]
+
+    return resp, content
+
+  def _new_id(self):
+    """Create a new id.
+
+    Auto incrementing number that avoids conflicts with ids already used.
+
+    Returns:
+       string, a new unique id.
+    """
+    self._last_auto_id += 1
+    while str(self._last_auto_id) in self._requests:
+      self._last_auto_id += 1
+    return str(self._last_auto_id)
+
+  @util.positional(2)
+  def add(self, request, callback=None, request_id=None):
+    """Add a new request.
+
+    Every callback added will be paired with a unique id, the request_id. That
+    unique id will be passed back to the callback when the response comes back
+    from the server. The default behavior is to have the library generate it's
+    own unique id. If the caller passes in a request_id then they must ensure
+    uniqueness for each request_id, and if they are not an exception is
+    raised. Callers should either supply all request_ids or nevery supply a
+    request id, to avoid such an error.
+
+    Args:
+      request: HttpRequest, Request to add to the batch.
+      callback: callable, A callback to be called for this response, of the
+        form callback(id, response, exception). The first parameter is the
+        request id, and the second is the deserialized response object. The
+        third is an googleapiclient.errors.HttpError exception object if an HTTP error
+        occurred while processing the request, or None if no errors occurred.
+      request_id: string, A unique id for the request. The id will be passed to
+        the callback with the response.
+
+    Returns:
+      None
+
+    Raises:
+      BatchError if a media request is added to a batch.
+      KeyError is the request_id is not unique.
+    """
+    if request_id is None:
+      request_id = self._new_id()
+    if request.resumable is not None:
+      raise BatchError("Media requests cannot be used in a batch request.")
+    if request_id in self._requests:
+      raise KeyError("A request with this ID already exists: %s" % request_id)
+    self._requests[request_id] = request
+    self._callbacks[request_id] = callback
+    self._order.append(request_id)
+
+  def _execute(self, http, order, requests):
+    """Serialize batch request, send to server, process response.
+
+    Args:
+      http: httplib2.Http, an http object to be used to make the request with.
+      order: list, list of request ids in the order they were added to the
+        batch.
+      request: list, list of request objects to send.
+
+    Raises:
+      httplib2.HttpLib2Error if a transport error has occured.
+      googleapiclient.errors.BatchError if the response is the wrong format.
+    """
+    message = MIMEMultipart('mixed')
+    # Message should not write out it's own headers.
+    setattr(message, '_write_headers', lambda self: None)
+
+    # Add all the individual requests.
+    for request_id in order:
+      request = requests[request_id]
+
+      msg = MIMENonMultipart('application', 'http')
+      msg['Content-Transfer-Encoding'] = 'binary'
+      msg['Content-ID'] = self._id_to_header(request_id)
+
+      body = self._serialize_request(request)
+      msg.set_payload(body)
+      message.attach(msg)
+
+    # encode the body: note that we can't use `as_string`, because
+    # it plays games with `From ` lines.
+    fp = StringIO()
+    g = Generator(fp, mangle_from_=False)
+    g.flatten(message, unixfrom=False)
+    body = fp.getvalue()
+
+    headers = {}
+    headers['content-type'] = ('multipart/mixed; '
+                               'boundary="%s"') % message.get_boundary()
+
+    resp, content = http.request(self._batch_uri, method='POST', body=body,
+                                 headers=headers)
+
+    if resp.status >= 300:
+      raise HttpError(resp, content, uri=self._batch_uri)
+
+    # Prepend with a content-type header so FeedParser can handle it.
+    header = 'content-type: %s\r\n\r\n' % resp['content-type']
+    # PY3's FeedParser only accepts unicode. So we should decode content
+    # here, and encode each payload again.
+    if six.PY3:
+      content = content.decode('utf-8')
+    for_parser = header + content
+
+    parser = FeedParser()
+    parser.feed(for_parser)
+    mime_response = parser.close()
+
+    if not mime_response.is_multipart():
+      raise BatchError("Response not in multipart/mixed format.", resp=resp,
+                       content=content)
+
+    for part in mime_response.get_payload():
+      request_id = self._header_to_id(part['Content-ID'])
+      response, content = self._deserialize_response(part.get_payload())
+      # We encode content here to emulate normal http response.
+      if isinstance(content, six.text_type):
+        content = content.encode('utf-8')
+      self._responses[request_id] = (response, content)
+
+  @util.positional(1)
+  def execute(self, http=None):
+    """Execute all the requests as a single batched HTTP request.
+
+    Args:
+      http: httplib2.Http, an http object to be used in place of the one the
+        HttpRequest request object was constructed with. If one isn't supplied
+        then use a http object from the requests in this batch.
+
+    Returns:
+      None
+
+    Raises:
+      httplib2.HttpLib2Error if a transport error has occured.
+      googleapiclient.errors.BatchError if the response is the wrong format.
+    """
+    # If we have no requests return
+    if len(self._order) == 0:
+      return None
+
+    # If http is not supplied use the first valid one given in the requests.
+    if http is None:
+      for request_id in self._order:
+        request = self._requests[request_id]
+        if request is not None:
+          http = request.http
+          break
+
+    if http is None:
+      raise ValueError("Missing a valid http object.")
+
+    # Special case for OAuth2Credentials-style objects which have not yet been
+    # refreshed with an initial access_token.
+    if getattr(http.request, 'credentials', None) is not None:
+      creds = http.request.credentials
+      if not getattr(creds, 'access_token', None):
+        LOGGER.info('Attempting refresh to obtain initial access_token')
+        creds.refresh(http)
+
+    self._execute(http, self._order, self._requests)
+
+    # Loop over all the requests and check for 401s. For each 401 request the
+    # credentials should be refreshed and then sent again in a separate batch.
+    redo_requests = {}
+    redo_order = []
+
+    for request_id in self._order:
+      resp, content = self._responses[request_id]
+      if resp['status'] == '401':
+        redo_order.append(request_id)
+        request = self._requests[request_id]
+        self._refresh_and_apply_credentials(request, http)
+        redo_requests[request_id] = request
+
+    if redo_requests:
+      self._execute(http, redo_order, redo_requests)
+
+    # Now process all callbacks that are erroring, and raise an exception for
+    # ones that return a non-2xx response? Or add extra parameter to callback
+    # that contains an HttpError?
+
+    for request_id in self._order:
+      resp, content = self._responses[request_id]
+
+      request = self._requests[request_id]
+      callback = self._callbacks[request_id]
+
+      response = None
+      exception = None
+      try:
+        if resp.status >= 300:
+          raise HttpError(resp, content, uri=request.uri)
+        response = request.postproc(resp, content)
+      except HttpError as e:
+        exception = e
+
+      if callback is not None:
+        callback(request_id, response, exception)
+      if self._callback is not None:
+        self._callback(request_id, response, exception)
+
+
+class HttpRequestMock(object):
+  """Mock of HttpRequest.
+
+  Do not construct directly, instead use RequestMockBuilder.
+  """
+
+  def __init__(self, resp, content, postproc):
+    """Constructor for HttpRequestMock
+
+    Args:
+      resp: httplib2.Response, the response to emulate coming from the request
+      content: string, the response body
+      postproc: callable, the post processing function usually supplied by
+                the model class. See model.JsonModel.response() as an example.
+    """
+    self.resp = resp
+    self.content = content
+    self.postproc = postproc
+    if resp is None:
+      self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
+    if 'reason' in self.resp:
+      self.resp.reason = self.resp['reason']
+
+  def execute(self, http=None):
+    """Execute the request.
+
+    Same behavior as HttpRequest.execute(), but the response is
+    mocked and not really from an HTTP request/response.
+    """
+    return self.postproc(self.resp, self.content)
+
+
+class RequestMockBuilder(object):
+  """A simple mock of HttpRequest
+
+    Pass in a dictionary to the constructor that maps request methodIds to
+    tuples of (httplib2.Response, content, opt_expected_body) that should be
+    returned when that method is called. None may also be passed in for the
+    httplib2.Response, in which case a 200 OK response will be generated.
+    If an opt_expected_body (str or dict) is provided, it will be compared to
+    the body and UnexpectedBodyError will be raised on inequality.
+
+    Example:
+      response = '{"data": {"id": "tag:google.c...'
+      requestBuilder = RequestMockBuilder(
+        {
+          'plus.activities.get': (None, response),
+        }
+      )
+      googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder)
+
+    Methods that you do not supply a response for will return a
+    200 OK with an empty string as the response content or raise an excpetion
+    if check_unexpected is set to True. The methodId is taken from the rpcName
+    in the discovery document.
+
+    For more details see the project wiki.
+  """
+
+  def __init__(self, responses, check_unexpected=False):
+    """Constructor for RequestMockBuilder
+
+    The constructed object should be a callable object
+    that can replace the class HttpResponse.
+
+    responses - A dictionary that maps methodIds into tuples
+                of (httplib2.Response, content). The methodId
+                comes from the 'rpcName' field in the discovery
+                document.
+    check_unexpected - A boolean setting whether or not UnexpectedMethodError
+                       should be raised on unsupplied method.
+    """
+    self.responses = responses
+    self.check_unexpected = check_unexpected
+
+  def __call__(self, http, postproc, uri, method='GET', body=None,
+               headers=None, methodId=None, resumable=None):
+    """Implements the callable interface that discovery.build() expects
+    of requestBuilder, which is to build an object compatible with
+    HttpRequest.execute(). See that method for the description of the
+    parameters and the expected response.
+    """
+    if methodId in self.responses:
+      response = self.responses[methodId]
+      resp, content = response[:2]
+      if len(response) > 2:
+        # Test the body against the supplied expected_body.
+        expected_body = response[2]
+        if bool(expected_body) != bool(body):
+          # Not expecting a body and provided one
+          # or expecting a body and not provided one.
+          raise UnexpectedBodyError(expected_body, body)
+        if isinstance(expected_body, str):
+          expected_body = json.loads(expected_body)
+        body = json.loads(body)
+        if body != expected_body:
+          raise UnexpectedBodyError(expected_body, body)
+      return HttpRequestMock(resp, content, postproc)
+    elif self.check_unexpected:
+      raise UnexpectedMethodError(methodId=methodId)
+    else:
+      model = JsonModel(False)
+      return HttpRequestMock(None, '{}', model.response)
+
+
+class HttpMock(object):
+  """Mock of httplib2.Http"""
+
+  def __init__(self, filename=None, headers=None):
+    """
+    Args:
+      filename: string, absolute filename to read response from
+      headers: dict, header to return with response
+    """
+    if headers is None:
+      headers = {'status': '200'}
+    if filename:
+      f = open(filename, 'rb')
+      self.data = f.read()
+      f.close()
+    else:
+      self.data = None
+    self.response_headers = headers
+    self.headers = None
+    self.uri = None
+    self.method = None
+    self.body = None
+    self.headers = None
+
+
+  def request(self, uri,
+              method='GET',
+              body=None,
+              headers=None,
+              redirections=1,
+              connection_type=None):
+    self.uri = uri
+    self.method = method
+    self.body = body
+    self.headers = headers
+    return httplib2.Response(self.response_headers), self.data
+
+
+class HttpMockSequence(object):
+  """Mock of httplib2.Http
+
+  Mocks a sequence of calls to request returning different responses for each
+  call. Create an instance initialized with the desired response headers
+  and content and then use as if an httplib2.Http instance.
+
+    http = HttpMockSequence([
+      ({'status': '401'}, ''),
+      ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
+      ({'status': '200'}, 'echo_request_headers'),
+      ])
+    resp, content = http.request("http://examples.com")
+
+  There are special values you can pass in for content to trigger
+  behavours that are helpful in testing.
+
+  'echo_request_headers' means return the request headers in the response body
+  'echo_request_headers_as_json' means return the request headers in
+     the response body
+  'echo_request_body' means return the request body in the response body
+  'echo_request_uri' means return the request uri in the response body
+  """
+
+  def __init__(self, iterable):
+    """
+    Args:
+      iterable: iterable, a sequence of pairs of (headers, body)
+    """
+    self._iterable = iterable
+    self.follow_redirects = True
+
+  def request(self, uri,
+              method='GET',
+              body=None,
+              headers=None,
+              redirections=1,
+              connection_type=None):
+    resp, content = self._iterable.pop(0)
+    if content == 'echo_request_headers':
+      content = headers
+    elif content == 'echo_request_headers_as_json':
+      content = json.dumps(headers)
+    elif content == 'echo_request_body':
+      if hasattr(body, 'read'):
+        content = body.read()
+      else:
+        content = body
+    elif content == 'echo_request_uri':
+      content = uri
+    if isinstance(content, six.text_type):
+      content = content.encode('utf-8')
+    return httplib2.Response(resp), content
+
+
+def set_user_agent(http, user_agent):
+  """Set the user-agent on every request.
+
+  Args:
+     http - An instance of httplib2.Http
+         or something that acts like it.
+     user_agent: string, the value for the user-agent header.
+
+  Returns:
+     A modified instance of http that was passed in.
+
+  Example:
+
+    h = httplib2.Http()
+    h = set_user_agent(h, "my-app-name/6.0")
+
+  Most of the time the user-agent will be set doing auth, this is for the rare
+  cases where you are accessing an unauthenticated endpoint.
+  """
+  request_orig = http.request
+
+  # The closure that will replace 'httplib2.Http.request'.
+  def new_request(uri, method='GET', body=None, headers=None,
+                  redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+                  connection_type=None):
+    """Modify the request headers to add the user-agent."""
+    if headers is None:
+      headers = {}
+    if 'user-agent' in headers:
+      headers['user-agent'] = user_agent + ' ' + headers['user-agent']
+    else:
+      headers['user-agent'] = user_agent
+    resp, content = request_orig(uri, method, body, headers,
+                        redirections, connection_type)
+    return resp, content
+
+  http.request = new_request
+  return http
+
+
+def tunnel_patch(http):
+  """Tunnel PATCH requests over POST.
+  Args:
+     http - An instance of httplib2.Http
+         or something that acts like it.
+
+  Returns:
+     A modified instance of http that was passed in.
+
+  Example:
+
+    h = httplib2.Http()
+    h = tunnel_patch(h, "my-app-name/6.0")
+
+  Useful if you are running on a platform that doesn't support PATCH.
+  Apply this last if you are using OAuth 1.0, as changing the method
+  will result in a different signature.
+  """
+  request_orig = http.request
+
+  # The closure that will replace 'httplib2.Http.request'.
+  def new_request(uri, method='GET', body=None, headers=None,
+                  redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+                  connection_type=None):
+    """Modify the request headers to add the user-agent."""
+    if headers is None:
+      headers = {}
+    if method == 'PATCH':
+      if 'oauth_token' in headers.get('authorization', ''):
+        LOGGER.warning(
+            'OAuth 1.0 request made with Credentials after tunnel_patch.')
+      headers['x-http-method-override'] = "PATCH"
+      method = 'POST'
+    resp, content = request_orig(uri, method, body, headers,
+                        redirections, connection_type)
+    return resp, content
+
+  http.request = new_request
+  return http
diff --git a/utils/frozen_chromite/third_party/googleapiclient/mimeparse.py b/utils/frozen_chromite/third_party/googleapiclient/mimeparse.py
new file mode 100644
index 0000000..bc9ad09
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/mimeparse.py
@@ -0,0 +1,175 @@
+# Copyright 2014 Joe Gregorio
+#
+# Licensed under the MIT License
+
+"""MIME-Type Parser
+
+This module provides basic functions for handling mime-types. It can handle
+matching mime-types against a list of media-ranges. See section 14.1 of the
+HTTP specification [RFC 2616] for a complete explanation.
+
+   http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
+
+Contents:
+ - parse_mime_type():   Parses a mime-type into its component parts.
+ - parse_media_range(): Media-ranges are mime-types with wild-cards and a 'q'
+                          quality parameter.
+ - quality():           Determines the quality ('q') of a mime-type when
+                          compared against a list of media-ranges.
+ - quality_parsed():    Just like quality() except the second parameter must be
+                          pre-parsed.
+ - best_match():        Choose the mime-type with the highest quality ('q')
+                          from a list of candidates.
+"""
+from __future__ import absolute_import
+from functools import reduce
+import six
+
+__version__ = '0.1.3'
+__author__ = 'Joe Gregorio'
+__email__ = '[email protected]'
+__license__ = 'MIT License'
+__credits__ = ''
+
+
+def parse_mime_type(mime_type):
+    """Parses a mime-type into its component parts.
+
+    Carves up a mime-type and returns a tuple of the (type, subtype, params)
+    where 'params' is a dictionary of all the parameters for the media range.
+    For example, the media range 'application/xhtml;q=0.5' would get parsed
+    into:
+
+       ('application', 'xhtml', {'q', '0.5'})
+       """
+    parts = mime_type.split(';')
+    params = dict([tuple([s.strip() for s in param.split('=', 1)])\
+            for param in parts[1:]
+                  ])
+    full_type = parts[0].strip()
+    # Java URLConnection class sends an Accept header that includes a
+    # single '*'. Turn it into a legal wildcard.
+    if full_type == '*':
+        full_type = '*/*'
+    (type, subtype) = full_type.split('/')
+
+    return (type.strip(), subtype.strip(), params)
+
+
+def parse_media_range(range):
+    """Parse a media-range into its component parts.
+
+    Carves up a media range and returns a tuple of the (type, subtype,
+    params) where 'params' is a dictionary of all the parameters for the media
+    range.  For example, the media range 'application/*;q=0.5' would get parsed
+    into:
+
+       ('application', '*', {'q', '0.5'})
+
+    In addition this function also guarantees that there is a value for 'q'
+    in the params dictionary, filling it in with a proper default if
+    necessary.
+    """
+    (type, subtype, params) = parse_mime_type(range)
+    if 'q' not in params or not params['q'] or \
+            not float(params['q']) or float(params['q']) > 1\
+            or float(params['q']) < 0:
+        params['q'] = '1'
+
+    return (type, subtype, params)
+
+
+def fitness_and_quality_parsed(mime_type, parsed_ranges):
+    """Find the best match for a mime-type amongst parsed media-ranges.
+
+    Find the best match for a given mime-type against a list of media_ranges
+    that have already been parsed by parse_media_range(). Returns a tuple of
+    the fitness value and the value of the 'q' quality parameter of the best
+    match, or (-1, 0) if no match was found. Just as for quality_parsed(),
+    'parsed_ranges' must be a list of parsed media ranges.
+    """
+    best_fitness = -1
+    best_fit_q = 0
+    (target_type, target_subtype, target_params) =\
+            parse_media_range(mime_type)
+    for (type, subtype, params) in parsed_ranges:
+        type_match = (type == target_type or\
+                      type == '*' or\
+                      target_type == '*')
+        subtype_match = (subtype == target_subtype or\
+                         subtype == '*' or\
+                         target_subtype == '*')
+        if type_match and subtype_match:
+            param_matches = reduce(lambda x, y: x + y, [1 for (key, value) in \
+                    six.iteritems(target_params) if key != 'q' and \
+                    key in params and value == params[key]], 0)
+            fitness = (type == target_type) and 100 or 0
+            fitness += (subtype == target_subtype) and 10 or 0
+            fitness += param_matches
+            if fitness > best_fitness:
+                best_fitness = fitness
+                best_fit_q = params['q']
+
+    return best_fitness, float(best_fit_q)
+
+
+def quality_parsed(mime_type, parsed_ranges):
+    """Find the best match for a mime-type amongst parsed media-ranges.
+
+    Find the best match for a given mime-type against a list of media_ranges
+    that have already been parsed by parse_media_range(). Returns the 'q'
+    quality parameter of the best match, 0 if no match was found. This function
+    bahaves the same as quality() except that 'parsed_ranges' must be a list of
+    parsed media ranges.
+    """
+
+    return fitness_and_quality_parsed(mime_type, parsed_ranges)[1]
+
+
+def quality(mime_type, ranges):
+    """Return the quality ('q') of a mime-type against a list of media-ranges.
+
+    Returns the quality 'q' of a mime-type when compared against the
+    media-ranges in ranges. For example:
+
+    >>> quality('text/html','text/*;q=0.3, text/html;q=0.7,
+                  text/html;level=1, text/html;level=2;q=0.4, */*;q=0.5')
+    0.7
+
+    """
+    parsed_ranges = [parse_media_range(r) for r in ranges.split(',')]
+
+    return quality_parsed(mime_type, parsed_ranges)
+
+
+def best_match(supported, header):
+    """Return mime-type with the highest quality ('q') from list of candidates.
+
+    Takes a list of supported mime-types and finds the best match for all the
+    media-ranges listed in header. The value of header must be a string that
+    conforms to the format of the HTTP Accept: header. The value of 'supported'
+    is a list of mime-types. The list of supported mime-types should be sorted
+    in order of increasing desirability, in case of a situation where there is
+    a tie.
+
+    >>> best_match(['application/xbel+xml', 'text/xml'],
+                   'text/*;q=0.5,*/*; q=0.1')
+    'text/xml'
+    """
+    split_header = _filter_blank(header.split(','))
+    parsed_header = [parse_media_range(r) for r in split_header]
+    weighted_matches = []
+    pos = 0
+    for mime_type in supported:
+        weighted_matches.append((fitness_and_quality_parsed(mime_type,
+                                 parsed_header), pos, mime_type))
+        pos += 1
+    weighted_matches.sort()
+
+    return weighted_matches[-1][0][1] and weighted_matches[-1][2] or ''
+
+
+def _filter_blank(i):
+    for s in i:
+        if s.strip():
+            yield s
diff --git a/utils/frozen_chromite/third_party/googleapiclient/model.py b/utils/frozen_chromite/third_party/googleapiclient/model.py
new file mode 100644
index 0000000..dded04e
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/model.py
@@ -0,0 +1,389 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Model objects for requests and responses.
+
+Each API may support one or more serializations, such
+as JSON, Atom, etc. The model classes are responsible
+for converting between the wire format and the Python
+object representation.
+"""
+from __future__ import absolute_import
+import six
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+import json
+import logging
+
+from six.moves.urllib.parse import urlencode
+
+from googleapiclient import __version__
+from googleapiclient.errors import HttpError
+
+
+LOGGER = logging.getLogger(__name__)
+
+dump_request_response = False
+
+
+def _abstract():
+  raise NotImplementedError('You need to override this function')
+
+
+class Model(object):
+  """Model base class.
+
+  All Model classes should implement this interface.
+  The Model serializes and de-serializes between a wire
+  format such as JSON and a Python object representation.
+  """
+
+  def request(self, headers, path_params, query_params, body_value):
+    """Updates outgoing requests with a serialized body.
+
+    Args:
+      headers: dict, request headers
+      path_params: dict, parameters that appear in the request path
+      query_params: dict, parameters that appear in the query
+      body_value: object, the request body as a Python object, which must be
+                  serializable.
+    Returns:
+      A tuple of (headers, path_params, query, body)
+
+      headers: dict, request headers
+      path_params: dict, parameters that appear in the request path
+      query: string, query part of the request URI
+      body: string, the body serialized in the desired wire format.
+    """
+    _abstract()
+
+  def response(self, resp, content):
+    """Convert the response wire format into a Python object.
+
+    Args:
+      resp: httplib2.Response, the HTTP response headers and status
+      content: string, the body of the HTTP response
+
+    Returns:
+      The body de-serialized as a Python object.
+
+    Raises:
+      googleapiclient.errors.HttpError if a non 2xx response is received.
+    """
+    _abstract()
+
+
+class BaseModel(Model):
+  """Base model class.
+
+  Subclasses should provide implementations for the "serialize" and
+  "deserialize" methods, as well as values for the following class attributes.
+
+  Attributes:
+    accept: The value to use for the HTTP Accept header.
+    content_type: The value to use for the HTTP Content-type header.
+    no_content_response: The value to return when deserializing a 204 "No
+        Content" response.
+    alt_param: The value to supply as the "alt" query parameter for requests.
+  """
+
+  accept = None
+  content_type = None
+  no_content_response = None
+  alt_param = None
+
+  def _log_request(self, headers, path_params, query, body):
+    """Logs debugging information about the request if requested."""
+    if dump_request_response:
+      LOGGER.info('--request-start--')
+      LOGGER.info('-headers-start-')
+      for h, v in six.iteritems(headers):
+        LOGGER.info('%s: %s', h, v)
+      LOGGER.info('-headers-end-')
+      LOGGER.info('-path-parameters-start-')
+      for h, v in six.iteritems(path_params):
+        LOGGER.info('%s: %s', h, v)
+      LOGGER.info('-path-parameters-end-')
+      LOGGER.info('body: %s', body)
+      LOGGER.info('query: %s', query)
+      LOGGER.info('--request-end--')
+
+  def request(self, headers, path_params, query_params, body_value):
+    """Updates outgoing requests with a serialized body.
+
+    Args:
+      headers: dict, request headers
+      path_params: dict, parameters that appear in the request path
+      query_params: dict, parameters that appear in the query
+      body_value: object, the request body as a Python object, which must be
+                  serializable by json.
+    Returns:
+      A tuple of (headers, path_params, query, body)
+
+      headers: dict, request headers
+      path_params: dict, parameters that appear in the request path
+      query: string, query part of the request URI
+      body: string, the body serialized as JSON
+    """
+    query = self._build_query(query_params)
+    headers['accept'] = self.accept
+    headers['accept-encoding'] = 'gzip, deflate'
+    if 'user-agent' in headers:
+      headers['user-agent'] += ' '
+    else:
+      headers['user-agent'] = ''
+    headers['user-agent'] += 'google-api-python-client/%s (gzip)' % __version__
+
+    if body_value is not None:
+      headers['content-type'] = self.content_type
+      body_value = self.serialize(body_value)
+    self._log_request(headers, path_params, query, body_value)
+    return (headers, path_params, query, body_value)
+
+  def _build_query(self, params):
+    """Builds a query string.
+
+    Args:
+      params: dict, the query parameters
+
+    Returns:
+      The query parameters properly encoded into an HTTP URI query string.
+    """
+    if self.alt_param is not None:
+      params.update({'alt': self.alt_param})
+    astuples = []
+    for key, value in six.iteritems(params):
+      if type(value) == type([]):
+        for x in value:
+          x = x.encode('utf-8')
+          astuples.append((key, x))
+      else:
+        if isinstance(value, six.text_type) and callable(value.encode):
+          value = value.encode('utf-8')
+        astuples.append((key, value))
+    return '?' + urlencode(astuples)
+
+  def _log_response(self, resp, content):
+    """Logs debugging information about the response if requested."""
+    if dump_request_response:
+      LOGGER.info('--response-start--')
+      for h, v in six.iteritems(resp):
+        LOGGER.info('%s: %s', h, v)
+      if content:
+        LOGGER.info(content)
+      LOGGER.info('--response-end--')
+
+  def response(self, resp, content):
+    """Convert the response wire format into a Python object.
+
+    Args:
+      resp: httplib2.Response, the HTTP response headers and status
+      content: string, the body of the HTTP response
+
+    Returns:
+      The body de-serialized as a Python object.
+
+    Raises:
+      googleapiclient.errors.HttpError if a non 2xx response is received.
+    """
+    self._log_response(resp, content)
+    # Error handling is TBD, for example, do we retry
+    # for some operation/error combinations?
+    if resp.status < 300:
+      if resp.status == 204:
+        # A 204: No Content response should be treated differently
+        # to all the other success states
+        return self.no_content_response
+      return self.deserialize(content)
+    else:
+      LOGGER.debug('Content from bad request was: %s' % content)
+      raise HttpError(resp, content)
+
+  def serialize(self, body_value):
+    """Perform the actual Python object serialization.
+
+    Args:
+      body_value: object, the request body as a Python object.
+
+    Returns:
+      string, the body in serialized form.
+    """
+    _abstract()
+
+  def deserialize(self, content):
+    """Perform the actual deserialization from response string to Python
+    object.
+
+    Args:
+      content: string, the body of the HTTP response
+
+    Returns:
+      The body de-serialized as a Python object.
+    """
+    _abstract()
+
+
+class JsonModel(BaseModel):
+  """Model class for JSON.
+
+  Serializes and de-serializes between JSON and the Python
+  object representation of HTTP request and response bodies.
+  """
+  accept = 'application/json'
+  content_type = 'application/json'
+  alt_param = 'json'
+
+  def __init__(self, data_wrapper=False):
+    """Construct a JsonModel.
+
+    Args:
+      data_wrapper: boolean, wrap requests and responses in a data wrapper
+    """
+    self._data_wrapper = data_wrapper
+
+  def serialize(self, body_value):
+    if (isinstance(body_value, dict) and 'data' not in body_value and
+        self._data_wrapper):
+      body_value = {'data': body_value}
+    return json.dumps(body_value)
+
+  def deserialize(self, content):
+    try:
+        content = content.decode('utf-8')
+    except AttributeError:
+        pass
+    body = json.loads(content)
+    if self._data_wrapper and isinstance(body, dict) and 'data' in body:
+      body = body['data']
+    return body
+
+  @property
+  def no_content_response(self):
+    return {}
+
+
+class RawModel(JsonModel):
+  """Model class for requests that don't return JSON.
+
+  Serializes and de-serializes between JSON and the Python
+  object representation of HTTP request, and returns the raw bytes
+  of the response body.
+  """
+  accept = '*/*'
+  content_type = 'application/json'
+  alt_param = None
+
+  def deserialize(self, content):
+    return content
+
+  @property
+  def no_content_response(self):
+    return ''
+
+
+class MediaModel(JsonModel):
+  """Model class for requests that return Media.
+
+  Serializes and de-serializes between JSON and the Python
+  object representation of HTTP request, and returns the raw bytes
+  of the response body.
+  """
+  accept = '*/*'
+  content_type = 'application/json'
+  alt_param = 'media'
+
+  def deserialize(self, content):
+    return content
+
+  @property
+  def no_content_response(self):
+    return ''
+
+
+class ProtocolBufferModel(BaseModel):
+  """Model class for protocol buffers.
+
+  Serializes and de-serializes the binary protocol buffer sent in the HTTP
+  request and response bodies.
+  """
+  accept = 'application/x-protobuf'
+  content_type = 'application/x-protobuf'
+  alt_param = 'proto'
+
+  def __init__(self, protocol_buffer):
+    """Constructs a ProtocolBufferModel.
+
+    The serialzed protocol buffer returned in an HTTP response will be
+    de-serialized using the given protocol buffer class.
+
+    Args:
+      protocol_buffer: The protocol buffer class used to de-serialize a
+      response from the API.
+    """
+    self._protocol_buffer = protocol_buffer
+
+  def serialize(self, body_value):
+    return body_value.SerializeToString()
+
+  def deserialize(self, content):
+    return self._protocol_buffer.FromString(content)
+
+  @property
+  def no_content_response(self):
+    return self._protocol_buffer()
+
+
+def makepatch(original, modified):
+  """Create a patch object.
+
+  Some methods support PATCH, an efficient way to send updates to a resource.
+  This method allows the easy construction of patch bodies by looking at the
+  differences between a resource before and after it was modified.
+
+  Args:
+    original: object, the original deserialized resource
+    modified: object, the modified deserialized resource
+  Returns:
+    An object that contains only the changes from original to modified, in a
+    form suitable to pass to a PATCH method.
+
+  Example usage:
+    item = service.activities().get(postid=postid, userid=userid).execute()
+    original = copy.deepcopy(item)
+    item['object']['content'] = 'This is updated.'
+    service.activities.patch(postid=postid, userid=userid,
+      body=makepatch(original, item)).execute()
+  """
+  patch = {}
+  for key, original_value in six.iteritems(original):
+    modified_value = modified.get(key, None)
+    if modified_value is None:
+      # Use None to signal that the element is deleted
+      patch[key] = None
+    elif original_value != modified_value:
+      if type(original_value) == type({}):
+        # Recursively descend objects
+        patch[key] = makepatch(original_value, modified_value)
+      else:
+        # In the case of simple types or arrays we just replace
+        patch[key] = modified_value
+    else:
+      # Don't add anything to patch if there's no change
+      pass
+  for key in modified:
+    if key not in original:
+      patch[key] = modified[key]
+
+  return patch
diff --git a/utils/frozen_chromite/third_party/googleapiclient/sample_tools.py b/utils/frozen_chromite/third_party/googleapiclient/sample_tools.py
new file mode 100644
index 0000000..2b4e7b4
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/sample_tools.py
@@ -0,0 +1,103 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for making samples.
+
+Consolidates a lot of code commonly repeated in sample applications.
+"""
+from __future__ import absolute_import
+
+__author__ = '[email protected] (Joe Gregorio)'
+__all__ = ['init']
+
+
+import argparse
+import httplib2
+import os
+
+from googleapiclient import discovery
+from oauth2client import client
+from oauth2client import file
+from oauth2client import tools
+
+
+def init(argv, name, version, doc, filename, scope=None, parents=[], discovery_filename=None):
+  """A common initialization routine for samples.
+
+  Many of the sample applications do the same initialization, which has now
+  been consolidated into this function. This function uses common idioms found
+  in almost all the samples, i.e. for an API with name 'apiname', the
+  credentials are stored in a file named apiname.dat, and the
+  client_secrets.json file is stored in the same directory as the application
+  main file.
+
+  Args:
+    argv: list of string, the command-line parameters of the application.
+    name: string, name of the API.
+    version: string, version of the API.
+    doc: string, description of the application. Usually set to __doc__.
+    file: string, filename of the application. Usually set to __file__.
+    parents: list of argparse.ArgumentParser, additional command-line flags.
+    scope: string, The OAuth scope used.
+    discovery_filename: string, name of local discovery file (JSON). Use when discovery doc not available via URL.
+
+  Returns:
+    A tuple of (service, flags), where service is the service object and flags
+    is the parsed command-line flags.
+  """
+  if scope is None:
+    scope = 'https://www.googleapis.com/auth/' + name
+
+  # Parser command-line arguments.
+  parent_parsers = [tools.argparser]
+  parent_parsers.extend(parents)
+  parser = argparse.ArgumentParser(
+      description=doc,
+      formatter_class=argparse.RawDescriptionHelpFormatter,
+      parents=parent_parsers)
+  flags = parser.parse_args(argv[1:])
+
+  # Name of a file containing the OAuth 2.0 information for this
+  # application, including client_id and client_secret, which are found
+  # on the API Access tab on the Google APIs
+  # Console <http://code.google.com/apis/console>.
+  client_secrets = os.path.join(os.path.dirname(filename),
+                                'client_secrets.json')
+
+  # Set up a Flow object to be used if we need to authenticate.
+  flow = client.flow_from_clientsecrets(client_secrets,
+      scope=scope,
+      message=tools.message_if_missing(client_secrets))
+
+  # Prepare credentials, and authorize HTTP object with them.
+  # If the credentials don't exist or are invalid run through the native client
+  # flow. The Storage object will ensure that if successful the good
+  # credentials will get written back to a file.
+  storage = file.Storage(name + '.dat')
+  credentials = storage.get()
+  if credentials is None or credentials.invalid:
+    credentials = tools.run_flow(flow, storage, flags)
+  http = credentials.authorize(http = httplib2.Http())
+
+  if discovery_filename is None:
+    # Construct a service object via the discovery service.
+    service = discovery.build(name, version, http=http)
+  else:
+    # Construct a service object using a local discovery document file.
+    with open(discovery_filename) as discovery_file:
+      service = discovery.build_from_document(
+          discovery_file.read(),
+          base='https://www.googleapis.com/',
+          http=http)
+  return (service, flags)
diff --git a/utils/frozen_chromite/third_party/googleapiclient/schema.py b/utils/frozen_chromite/third_party/googleapiclient/schema.py
new file mode 100644
index 0000000..9feaf28
--- /dev/null
+++ b/utils/frozen_chromite/third_party/googleapiclient/schema.py
@@ -0,0 +1,318 @@
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Schema processing for discovery based APIs
+
+Schemas holds an APIs discovery schemas. It can return those schema as
+deserialized JSON objects, or pretty print them as prototype objects that
+conform to the schema.
+
+For example, given the schema:
+
+ schema = \"\"\"{
+   "Foo": {
+    "type": "object",
+    "properties": {
+     "etag": {
+      "type": "string",
+      "description": "ETag of the collection."
+     },
+     "kind": {
+      "type": "string",
+      "description": "Type of the collection ('calendar#acl').",
+      "default": "calendar#acl"
+     },
+     "nextPageToken": {
+      "type": "string",
+      "description": "Token used to access the next
+         page of this result. Omitted if no further results are available."
+     }
+    }
+   }
+ }\"\"\"
+
+ s = Schemas(schema)
+ print s.prettyPrintByName('Foo')
+
+ Produces the following output:
+
+  {
+   "nextPageToken": "A String", # Token used to access the
+       # next page of this result. Omitted if no further results are available.
+   "kind": "A String", # Type of the collection ('calendar#acl').
+   "etag": "A String", # ETag of the collection.
+  },
+
+The constructor takes a discovery document in which to look up named schema.
+"""
+from __future__ import absolute_import
+import six
+
+# TODO(jcgregorio) support format, enum, minimum, maximum
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+import copy
+
+# Oauth2client < 3 has the positional helper in 'util', >= 3 has it
+# in '_helpers'.
+try:
+  from oauth2client import util
+except ImportError:
+  from oauth2client import _helpers as util
+
+
+class Schemas(object):
+  """Schemas for an API."""
+
+  def __init__(self, discovery):
+    """Constructor.
+
+    Args:
+      discovery: object, Deserialized discovery document from which we pull
+        out the named schema.
+    """
+    self.schemas = discovery.get('schemas', {})
+
+    # Cache of pretty printed schemas.
+    self.pretty = {}
+
+  @util.positional(2)
+  def _prettyPrintByName(self, name, seen=None, dent=0):
+    """Get pretty printed object prototype from the schema name.
+
+    Args:
+      name: string, Name of schema in the discovery document.
+      seen: list of string, Names of schema already seen. Used to handle
+        recursive definitions.
+
+    Returns:
+      string, A string that contains a prototype object with
+        comments that conforms to the given schema.
+    """
+    if seen is None:
+      seen = []
+
+    if name in seen:
+      # Do not fall into an infinite loop over recursive definitions.
+      return '# Object with schema name: %s' % name
+    seen.append(name)
+
+    if name not in self.pretty:
+      self.pretty[name] = _SchemaToStruct(self.schemas[name],
+          seen, dent=dent).to_str(self._prettyPrintByName)
+
+    seen.pop()
+
+    return self.pretty[name]
+
+  def prettyPrintByName(self, name):
+    """Get pretty printed object prototype from the schema name.
+
+    Args:
+      name: string, Name of schema in the discovery document.
+
+    Returns:
+      string, A string that contains a prototype object with
+        comments that conforms to the given schema.
+    """
+    # Return with trailing comma and newline removed.
+    return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
+
+  @util.positional(2)
+  def _prettyPrintSchema(self, schema, seen=None, dent=0):
+    """Get pretty printed object prototype of schema.
+
+    Args:
+      schema: object, Parsed JSON schema.
+      seen: list of string, Names of schema already seen. Used to handle
+        recursive definitions.
+
+    Returns:
+      string, A string that contains a prototype object with
+        comments that conforms to the given schema.
+    """
+    if seen is None:
+      seen = []
+
+    return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
+
+  def prettyPrintSchema(self, schema):
+    """Get pretty printed object prototype of schema.
+
+    Args:
+      schema: object, Parsed JSON schema.
+
+    Returns:
+      string, A string that contains a prototype object with
+        comments that conforms to the given schema.
+    """
+    # Return with trailing comma and newline removed.
+    return self._prettyPrintSchema(schema, dent=1)[:-2]
+
+  def get(self, name):
+    """Get deserialized JSON schema from the schema name.
+
+    Args:
+      name: string, Schema name.
+    """
+    return self.schemas[name]
+
+
+class _SchemaToStruct(object):
+  """Convert schema to a prototype object."""
+
+  @util.positional(3)
+  def __init__(self, schema, seen, dent=0):
+    """Constructor.
+
+    Args:
+      schema: object, Parsed JSON schema.
+      seen: list, List of names of schema already seen while parsing. Used to
+        handle recursive definitions.
+      dent: int, Initial indentation depth.
+    """
+    # The result of this parsing kept as list of strings.
+    self.value = []
+
+    # The final value of the parsing.
+    self.string = None
+
+    # The parsed JSON schema.
+    self.schema = schema
+
+    # Indentation level.
+    self.dent = dent
+
+    # Method that when called returns a prototype object for the schema with
+    # the given name.
+    self.from_cache = None
+
+    # List of names of schema already seen while parsing.
+    self.seen = seen
+
+  def emit(self, text):
+    """Add text as a line to the output.
+
+    Args:
+      text: string, Text to output.
+    """
+    self.value.extend(["  " * self.dent, text, '\n'])
+
+  def emitBegin(self, text):
+    """Add text to the output, but with no line terminator.
+
+    Args:
+      text: string, Text to output.
+      """
+    self.value.extend(["  " * self.dent, text])
+
+  def emitEnd(self, text, comment):
+    """Add text and comment to the output with line terminator.
+
+    Args:
+      text: string, Text to output.
+      comment: string, Python comment.
+    """
+    if comment:
+      divider = '\n' + '  ' * (self.dent + 2) + '# '
+      lines = comment.splitlines()
+      lines = [x.rstrip() for x in lines]
+      comment = divider.join(lines)
+      self.value.extend([text, ' # ', comment, '\n'])
+    else:
+      self.value.extend([text, '\n'])
+
+  def indent(self):
+    """Increase indentation level."""
+    self.dent += 1
+
+  def undent(self):
+    """Decrease indentation level."""
+    self.dent -= 1
+
+  def _to_str_impl(self, schema):
+    """Prototype object based on the schema, in Python code with comments.
+
+    Args:
+      schema: object, Parsed JSON schema file.
+
+    Returns:
+      Prototype object based on the schema, in Python code with comments.
+    """
+    stype = schema.get('type')
+    if stype == 'object':
+      self.emitEnd('{', schema.get('description', ''))
+      self.indent()
+      if 'properties' in schema:
+        for pname, pschema in six.iteritems(schema.get('properties', {})):
+          self.emitBegin('"%s": ' % pname)
+          self._to_str_impl(pschema)
+      elif 'additionalProperties' in schema:
+        self.emitBegin('"a_key": ')
+        self._to_str_impl(schema['additionalProperties'])
+      self.undent()
+      self.emit('},')
+    elif '$ref' in schema:
+      schemaName = schema['$ref']
+      description = schema.get('description', '')
+      s = self.from_cache(schemaName, seen=self.seen)
+      parts = s.splitlines()
+      self.emitEnd(parts[0], description)
+      for line in parts[1:]:
+        self.emit(line.rstrip())
+    elif stype == 'boolean':
+      value = schema.get('default', 'True or False')
+      self.emitEnd('%s,' % str(value), schema.get('description', ''))
+    elif stype == 'string':
+      value = schema.get('default', 'A String')
+      self.emitEnd('"%s",' % str(value), schema.get('description', ''))
+    elif stype == 'integer':
+      value = schema.get('default', '42')
+      self.emitEnd('%s,' % str(value), schema.get('description', ''))
+    elif stype == 'number':
+      value = schema.get('default', '3.14')
+      self.emitEnd('%s,' % str(value), schema.get('description', ''))
+    elif stype == 'null':
+      self.emitEnd('None,', schema.get('description', ''))
+    elif stype == 'any':
+      self.emitEnd('"",', schema.get('description', ''))
+    elif stype == 'array':
+      self.emitEnd('[', schema.get('description'))
+      self.indent()
+      self.emitBegin('')
+      self._to_str_impl(schema['items'])
+      self.undent()
+      self.emit('],')
+    else:
+      self.emit('Unknown type! %s' % stype)
+      self.emitEnd('', '')
+
+    self.string = ''.join(self.value)
+    return self.string
+
+  def to_str(self, from_cache):
+    """Prototype object based on the schema, in Python code with comments.
+
+    Args:
+      from_cache: callable(name, seen), Callable that retrieves an object
+         prototype for a schema with the given name. Seen is a list of schema
+         names already seen as we recursively descend the schema definition.
+
+    Returns:
+      Prototype object based on the schema, in Python code with comments.
+      The lines of the code will all be properly indented.
+    """
+    self.from_cache = from_cache
+    return self._to_str_impl(self.schema)
diff --git a/utils/frozen_chromite/third_party/infra_libs/.coveragerc b/utils/frozen_chromite/third_party/infra_libs/.coveragerc
new file mode 100644
index 0000000..741eb75
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/.coveragerc
@@ -0,0 +1,27 @@
+# This file exist despite having .coveragerc in infra_libs because on Windows
+# bots, we only execute tests in certain modules of infra_libs (including this
+# one) and the latest version of coverage throws an exception when given a
+# non-existant config file.
+
+[run]
+include = ./packages/infra_libs/infra_libs/*
+
+[report]
+exclude_lines =
+    # Have to re-enable the standard pragma
+    pragma: no cover
+
+    # Don't complain about missing debug-only code:
+    def __repr__
+    if self\.debug
+
+    # Don't complain if tests don't hit defensive assertion code:
+    raise AssertionError
+    raise NotImplementedError
+
+    # Don't complain if non-runnable code isn't run:
+    if 0:
+    if __name__ == ['"]__main__['"]:
+
+[expect_tests]
+expected_coverage_min = 100
diff --git a/utils/frozen_chromite/third_party/infra_libs/__init__.py b/utils/frozen_chromite/third_party/infra_libs/__init__.py
new file mode 100644
index 0000000..ccfee2f
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/__init__.py
@@ -0,0 +1,8 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import ts_mon  # Must be imported first so httplib2_utils can import it.
+
+from infra_libs.httplib2_utils import RetriableHttp, InstrumentedHttp, HttpMock
+from infra_libs.utils import temporary_directory
diff --git a/utils/frozen_chromite/third_party/infra_libs/httplib2_utils.py b/utils/frozen_chromite/third_party/infra_libs/httplib2_utils.py
new file mode 100644
index 0000000..034e1ec
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/httplib2_utils.py
@@ -0,0 +1,278 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import base64
+import collections
+import copy
+import json
+import logging
+import re
+import socket
+import time
+
+import httplib2
+import oauth2client.client
+import six
+from six.moves import http_client as httplib
+
+from googleapiclient import errors
+from infra_libs.ts_mon.common import http_metrics
+
+# TODO(nxia): crbug.com/790760 upgrade oauth2client to 4.1.2.
+oauth2client_util_imported = False
+try:
+  from oauth2client import util
+  oauth2client_util_imported = True
+except ImportError:
+  pass
+
+
+# default timeout for http requests, in seconds
+DEFAULT_TIMEOUT = 30
+
+
+class AuthError(Exception):
+  pass
+
+
+class DelegateServiceAccountCredentials(
+    oauth2client.client.AssertionCredentials):
+  """Authorizes an HTTP client with a service account for which we are an actor.
+
+  This class uses the IAM API to sign a JWT with the private key of another
+  service account for which we have the "Service Account Actor" role.
+  """
+
+  MAX_TOKEN_LIFETIME_SECS = 3600 # 1 hour in seconds
+  _SIGN_BLOB_URL = 'https://iam.googleapis.com/v1/%s:signBlob'
+
+  def __init__(self, http, service_account_email, scopes, project='-'):
+    """
+    Args:
+      http: An httplib2.Http object that is authorized by another
+        oauth2client.client.OAuth2Credentials with credentials that have the
+        service account actor role on the service_account_email.
+      service_account_email: The email address of the service account for which
+        to obtain an access token.
+      scopes: The desired scopes for the token.
+      project: The cloud project to which service_account_email belongs.  The
+        default of '-' makes the IAM API figure it out for us.
+    """
+    if not oauth2client_util_imported:
+      raise AssertionError('Failed to import oauth2client.util.')
+    super(DelegateServiceAccountCredentials, self).__init__(None)
+    self._service_account_email = service_account_email
+    self._scopes = util.scopes_to_string(scopes)
+    self._http = http
+    self._name = 'projects/%s/serviceAccounts/%s' % (
+        project, service_account_email)
+
+  def sign_blob(self, blob):
+    response, content = self._http.request(
+        self._SIGN_BLOB_URL % self._name,
+        method='POST',
+        body=json.dumps({'bytesToSign': base64.b64encode(blob)}),
+        headers={'Content-Type': 'application/json'})
+    if response.status != 200:
+      raise AuthError('Failed to sign blob as %s: %d %s' % (
+          self._service_account_email, response.status, response.reason))
+
+    data = json.loads(content)
+    return data['keyId'], data['signature']
+
+  def _generate_assertion(self):
+    # This is copied with small modifications from
+    # oauth2client.service_account._ServiceAccountCredentials.
+
+    header = {
+        'alg': 'RS256',
+        'typ': 'JWT',
+    }
+
+    now = int(time.time())
+    payload = {
+        'aud': self.token_uri,
+        'scope': self._scopes,
+        'iat': now,
+        'exp': now + self.MAX_TOKEN_LIFETIME_SECS,
+        'iss': self._service_account_email,
+    }
+
+    assertion_input = (
+        self._urlsafe_b64encode(header) + b'.' +
+        self._urlsafe_b64encode(payload))
+
+    # Sign the assertion.
+    _, rsa_bytes = self.sign_blob(assertion_input)
+    signature = rsa_bytes.rstrip(b'=')
+
+    return assertion_input + b'.' + signature
+
+  def _urlsafe_b64encode(self, data):
+    # Copied verbatim from oauth2client.service_account.
+    return base64.urlsafe_b64encode(
+        json.dumps(data, separators=(',', ':')).encode('UTF-8')).rstrip(b'=')
+
+
+class RetriableHttp(object):
+  """A httplib2.Http object that retries on failure."""
+
+  def __init__(self, http, max_tries=5, backoff_time=1,
+               retrying_statuses_fn=None):
+    """
+    Args:
+      http: an httplib2.Http instance
+      max_tries: a number of maximum tries
+      backoff_time: a number of seconds to sleep between retries
+      retrying_statuses_fn: a function that returns True if a given status
+                            should be retried
+    """
+    self._http = http
+    self._max_tries = max_tries
+    self._backoff_time = backoff_time
+    self._retrying_statuses_fn = retrying_statuses_fn or \
+                                 set(range(500,599)).__contains__
+
+  def request(self, uri, method='GET', body=None, *args, **kwargs):
+    for i in range(1, self._max_tries + 1):
+      try:
+        response, content = self._http.request(uri, method, body, *args,
+                                               **kwargs)
+
+        if self._retrying_statuses_fn(response.status):
+          logging.info('RetriableHttp: attempt %d receiving status %d, %s',
+                       i, response.status,
+                       'final attempt' if i == self._max_tries else \
+                       'will retry')
+        else:
+          break
+      except (ValueError, errors.Error,
+              socket.timeout, socket.error, socket.herror, socket.gaierror,
+              httplib2.HttpLib2Error) as error:
+        logging.info('RetriableHttp: attempt %d received exception: %s, %s',
+                     i, error, 'final attempt' if i == self._max_tries else \
+                     'will retry')
+        if i == self._max_tries:
+          raise
+      time.sleep(self._backoff_time)
+
+    return response, content
+
+  def __getattr__(self, name):
+    return getattr(self._http, name)
+
+  def __setattr__(self, name, value):
+    if name in ('request', '_http', '_max_tries', '_backoff_time',
+                '_retrying_statuses_fn'):
+      self.__dict__[name] = value
+    else:
+      setattr(self._http, name, value)
+
+
+class InstrumentedHttp(httplib2.Http):
+  """A httplib2.Http object that reports ts_mon metrics about its requests."""
+
+  def __init__(self, name, time_fn=time.time, timeout=DEFAULT_TIMEOUT,
+               **kwargs):
+    """
+    Args:
+      name: An identifier for the HTTP requests made by this object.
+      time_fn: Function returning the current time in seconds. Use for testing
+        purposes only.
+    """
+
+    super(InstrumentedHttp, self).__init__(timeout=timeout, **kwargs)
+    self.fields = {'name': name, 'client': 'httplib2'}
+    self.time_fn = time_fn
+
+  def _update_metrics(self, status, start_time):
+    status_fields = {'status': status}
+    status_fields.update(self.fields)
+    http_metrics.response_status.increment(fields=status_fields)
+
+    duration_msec = (self.time_fn() - start_time) * 1000
+    http_metrics.durations.add(duration_msec, fields=self.fields)
+
+  def request(self, uri, method="GET", body=None, *args, **kwargs):
+    request_bytes = 0
+    if body is not None:
+      request_bytes = len(body)
+    http_metrics.request_bytes.add(request_bytes, fields=self.fields)
+
+    start_time = self.time_fn()
+    try:
+      response, content = super(InstrumentedHttp, self).request(
+          uri, method, body, *args, **kwargs)
+    except socket.timeout:
+      self._update_metrics(http_metrics.STATUS_TIMEOUT, start_time)
+      raise
+    except (socket.error, socket.herror, socket.gaierror):
+      self._update_metrics(http_metrics.STATUS_ERROR, start_time)
+      raise
+    except (httplib.HTTPException, httplib2.HttpLib2Error) as ex:
+      status = http_metrics.STATUS_EXCEPTION
+      if 'Deadline exceeded while waiting for HTTP response' in str(ex):
+        # Raised on Appengine (gae_override/httplib.py).
+        status = http_metrics.STATUS_TIMEOUT
+      self._update_metrics(status, start_time)
+      raise
+    http_metrics.response_bytes.add(len(content), fields=self.fields)
+
+    self._update_metrics(response.status, start_time)
+
+    return response, content
+
+
+class HttpMock(object):
+  """Mock of httplib2.Http"""
+  HttpCall = collections.namedtuple('HttpCall', ('uri', 'method', 'body',
+                                                 'headers'))
+
+  def __init__(self, uris):
+    """
+    Args:
+      uris(dict): list of  (uri, headers, body). `uri` is a regexp for
+        matching the requested uri, (headers, body) gives the values returned
+        by the mock. Uris are tested in the order from `uris`.
+        `headers` is a dict mapping headers to value. The 'status' key is
+        mandatory. `body` is a string.
+        Ex: [('.*', {'status': 200}, 'nicely done.')]
+    """
+    self._uris = []
+    self.requests_made = []
+
+    for value in uris:
+      if not isinstance(value, (list, tuple)) or len(value) != 3:
+        raise ValueError("'uris' must be a sequence of (uri, headers, body)")
+      uri, headers, body = value
+      compiled_uri = re.compile(uri)
+      if not isinstance(headers, dict):
+        raise TypeError("'headers' must be a dict")
+      if not 'status' in headers:
+        raise ValueError("'headers' must have 'status' as a key")
+
+      new_headers = copy.copy(headers)
+      new_headers['status'] = int(new_headers['status'])
+
+      if not isinstance(body, six.string_types):
+        raise TypeError("'body' must be a string, got %s" % type(body))
+      self._uris.append((compiled_uri, new_headers, body))
+
+  # pylint: disable=unused-argument
+  def request(self, uri,
+              method='GET',
+              body=None,
+              headers=None,
+              redirections=1,
+              connection_type=None):
+    self.requests_made.append(self.HttpCall(uri, method, body, headers))
+    headers = None
+    body = None
+    for candidate in self._uris:
+      if candidate[0].match(uri):
+        _, headers, body = candidate
+        break
+    if not headers:
+      raise AssertionError("Unexpected request to %s" % uri)
+    return httplib2.Response(headers), body
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/.coveragerc b/utils/frozen_chromite/third_party/infra_libs/ts_mon/.coveragerc
new file mode 100644
index 0000000..4bcf905
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/.coveragerc
@@ -0,0 +1,27 @@
+# This file exist despite having .coveragerc in infra_libs because on Windows
+# bots, we only execute tests in certain modules of infra_libs (including this
+# one) and the latest version of coverage throws an exception when given a
+# non-existant config file.
+
+[run]
+include = ./packages/infra_libs/infra_libs/ts_mon/*
+
+[report]
+exclude_lines =
+    # Have to re-enable the standard pragma
+    pragma: no cover
+
+    # Don't complain about missing debug-only code:
+    def __repr__
+    if self\.debug
+
+    # Don't complain if tests don't hit defensive assertion code:
+    raise AssertionError
+    raise NotImplementedError
+
+    # Don't complain if non-runnable code isn't run:
+    if 0:
+    if __name__ == ['"]__main__['"]:
+
+[expect_tests]
+expected_coverage_min = 100
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/__init__.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/__init__.py
new file mode 100644
index 0000000..4a60d3d
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/__init__.py
@@ -0,0 +1,46 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from infra_libs.ts_mon.config import add_argparse_options
+from infra_libs.ts_mon.config import process_argparse_options
+
+from infra_libs.ts_mon.common.distribution import Distribution
+from infra_libs.ts_mon.common.distribution import FixedWidthBucketer
+from infra_libs.ts_mon.common.distribution import GeometricBucketer
+
+from infra_libs.ts_mon.common.errors import MonitoringError
+from infra_libs.ts_mon.common.errors import MonitoringDecreasingValueError
+from infra_libs.ts_mon.common.errors import MonitoringDuplicateRegistrationError
+from infra_libs.ts_mon.common.errors import MonitoringIncrementUnsetValueError
+from infra_libs.ts_mon.common.errors import MonitoringInvalidFieldTypeError
+from infra_libs.ts_mon.common.errors import MonitoringInvalidValueTypeError
+from infra_libs.ts_mon.common.errors import MonitoringTooManyFieldsError
+from infra_libs.ts_mon.common.errors import MonitoringNoConfiguredMonitorError
+from infra_libs.ts_mon.common.errors import MonitoringNoConfiguredTargetError
+
+from infra_libs.ts_mon.common.helpers import ScopedIncrementCounter
+from infra_libs.ts_mon.common.helpers import ScopedMeasureTime
+
+from infra_libs.ts_mon.common.interface import close
+from infra_libs.ts_mon.common.interface import flush
+from infra_libs.ts_mon.common.interface import register_global_metrics
+from infra_libs.ts_mon.common.interface import register_global_metrics_callback
+from infra_libs.ts_mon.common.interface import reset_for_unittest
+
+from infra_libs.ts_mon.common.metrics import BooleanField
+from infra_libs.ts_mon.common.metrics import IntegerField
+from infra_libs.ts_mon.common.metrics import StringField
+
+from infra_libs.ts_mon.common.metrics import BooleanMetric
+from infra_libs.ts_mon.common.metrics import CounterMetric
+from infra_libs.ts_mon.common.metrics import CumulativeDistributionMetric
+from infra_libs.ts_mon.common.metrics import CumulativeMetric
+from infra_libs.ts_mon.common.metrics import FloatMetric
+from infra_libs.ts_mon.common.metrics import GaugeMetric
+from infra_libs.ts_mon.common.metrics import MetricsDataUnits
+from infra_libs.ts_mon.common.metrics import NonCumulativeDistributionMetric
+from infra_libs.ts_mon.common.metrics import StringMetric
+
+from infra_libs.ts_mon.common.targets import TaskTarget
+from infra_libs.ts_mon.common.targets import DeviceTarget
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/__init__.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/__init__.py
new file mode 100644
index 0000000..50b23df
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/distribution.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/distribution.py
new file mode 100644
index 0000000..99b0fb1
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/distribution.py
@@ -0,0 +1,139 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import bisect
+import collections
+
+
+class _Bucketer(object):
+  """Bucketing function for histograms recorded by the Distribution class."""
+
+  def __init__(self, width, growth_factor, num_finite_buckets, scale=1.0):
+    """The bucket sizes are controlled by width and growth_factor, and the total
+    number of buckets is set by num_finite_buckets:
+
+    Args:
+      width: fixed size of each bucket (ignores |scale|).
+      growth_factor: if non-zero, the size of each bucket increases by another
+          multiplicative factor of this factor (see lower bound formula below).
+      num_finite_buckets: the number of finite buckets.  There are two
+          additional buckets - an underflow and an overflow bucket - that have
+          lower and upper bounds of Infinity.
+      scale: overall scale factor to apply to buckets, if using geometric
+          buckets.
+
+    Specify a width for fixed-size buckets or specify a growth_factor for bucket
+    sizes that follow a geometric progression.  Specifying both is not valid.
+
+    For fixed-size buckets::
+
+      The i'th bucket covers the interval [(i-1) * width, i * width),  where i
+      ranges from 1 to num_finite_buckets, inclusive:
+
+      bucket number                   lower bound      upper bound
+      i == 0 (underflow)              -inf             0
+      1 <= i <= num_buckets           (i-1) * width    i * width
+      i == num_buckets+1 (overflow)   (i-1) * width    +inf
+
+    For geometric buckets::
+
+      The i'th bucket covers the interval [factor^(i-1), factor^i) * scale
+      where i ranges from 1 to num_finite_buckets inclusive.
+
+      bucket number                   lower bound            upper bound
+      i == 0 (underflow)              -inf                   scale
+      1 <= i <= num_buckets           factor^(i-1) * scale   factor^i * scale
+      i == num_buckets+1 (overflow)   factor^(i-1) * scale   +inf
+    """
+
+    if num_finite_buckets < 0:
+      raise ValueError('num_finite_buckets must be >= 0 (was %d)' %
+          num_finite_buckets)
+    if width != 0 and growth_factor != 0:
+      raise ValueError('a Bucketer must be created with either a width or a '
+                       'growth factor, not both')
+
+    self.width = width
+    self.growth_factor = growth_factor
+    self.num_finite_buckets = num_finite_buckets
+    self.total_buckets = num_finite_buckets + 2
+    self.underflow_bucket = 0
+    self.overflow_bucket = self.total_buckets - 1
+    self.scale = scale
+
+    if width != 0:
+      self._lower_bounds = [float('-Inf')] + self._linear_bounds()
+    else:
+      self._lower_bounds = [float('-Inf')] + self._exponential_bounds()
+
+    # Sanity check the bucket lower bounds we created.
+    assert len(self._lower_bounds) == self.total_buckets
+    assert all(x < y for x, y in zip(
+        self._lower_bounds, self._lower_bounds[1:])), (
+        'bucket boundaries must be monotonically increasing')
+
+  def __eq__(self, other):
+    return (type(self) is type(other) and
+            self.width == other.width and
+            self.growth_factor == other.growth_factor and
+            self.num_finite_buckets == other.num_finite_buckets and
+            self.scale == other.scale)
+
+  def _linear_bounds(self):
+    return [self.width * i for i in range(self.num_finite_buckets + 1)]
+
+  def _exponential_bounds(self):
+    return [
+        self.scale * self.growth_factor ** i
+        for i in range(self.num_finite_buckets + 1)]
+
+  def bucket_for_value(self, value):
+    """Returns the index of the bucket that this value belongs to."""
+
+    # bisect.bisect_left is wrong because the buckets are of [lower, upper) form
+    return bisect.bisect(self._lower_bounds, value) - 1
+
+  def bucket_boundaries(self, bucket):
+    """Returns a tuple that is the [lower, upper) bounds of this bucket.
+
+    The lower bound of the first bucket is -Infinity, and the upper bound of the
+    last bucket is +Infinity.
+    """
+
+    if bucket < 0 or bucket >= self.total_buckets:
+      raise IndexError('bucket %d out of range' % bucket)
+    if bucket == self.total_buckets - 1:
+      return (self._lower_bounds[bucket], float('Inf'))
+    return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])
+
+
+def FixedWidthBucketer(width, num_finite_buckets=100):
+  """Convenience function that returns a fixed width Bucketer."""
+  return _Bucketer(width=width, growth_factor=0.0,
+      num_finite_buckets=num_finite_buckets)
+
+
+def GeometricBucketer(growth_factor=10**0.2, num_finite_buckets=100,
+                      scale=1.0):
+  """Convenience function that returns a geometric progression Bucketer."""
+  return _Bucketer(width=0, growth_factor=growth_factor,
+      num_finite_buckets=num_finite_buckets, scale=scale)
+
+
+class Distribution(object):
+  """Holds a histogram distribution.
+
+  Buckets are chosen for values by the provided Bucketer.
+  """
+
+  def __init__(self, bucketer):
+    self.bucketer = bucketer
+    self.sum = 0
+    self.count = 0
+    self.buckets = collections.defaultdict(int)
+
+  def add(self, value):
+    self.buckets[self.bucketer.bucket_for_value(value)] += 1
+    self.sum += value
+    self.count += 1
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/errors.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/errors.py
new file mode 100644
index 0000000..251bcfd
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/errors.py
@@ -0,0 +1,134 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Classes representing errors that can be raised by the monitoring library."""
+
+
+class MonitoringError(Exception):
+  """Base class for exceptions raised by this module."""
+
+
+class MonitoringDecreasingValueError(MonitoringError):
+  """Raised when setting a metric value that should increase but doesn't."""
+
+  def __init__(self, metric, old_value, new_value):
+    self.metric = metric
+    self.old_value = old_value
+    self.new_value = new_value
+
+  def __str__(self):
+    return ('Monotonically increasing metric "%s" was given value "%s", which '
+            'is not greater than or equal to "%s".' % (
+                self.metric, self.new_value, self.old_value))
+
+
+class MonitoringDuplicateRegistrationError(MonitoringError):
+  """Raised when trying to register a metric with the same name as another."""
+
+  def __init__(self, metric):
+    self.metric = metric
+
+  def __str__(self):
+    return 'Different metrics with the same name "%s" were both registered.' % (
+        self.metric)
+
+
+class MonitoringIncrementUnsetValueError(MonitoringError):
+  """Raised when trying to increment a metric which hasn't been set."""
+
+  def __init__(self, metric):
+    self.metric = metric
+
+  def __str__(self):
+    return 'Metric "%s" was incremented without first setting a value.' % (
+        self.metric)
+
+
+class MonitoringInvalidValueTypeError(MonitoringError):
+  """Raised when sending a metric value is not a valid type."""
+
+  def __init__(self, metric, value):
+    self.metric = metric
+    self.value = value
+
+  def __str__(self):
+    return 'Metric "%s" was given invalid value "%s" (%s).' % (
+        self.metric, self.value, type(self.value))
+
+
+class MonitoringInvalidFieldTypeError(MonitoringError):
+  """Raised when sending a metric with a field value of an invalid type."""
+
+  def __init__(self, metric, field, value):
+    self.metric = metric
+    self.field = field
+    self.value = value
+
+  def __str__(self):
+    return 'Metric "%s" was given field "%s" with invalid value "%s" (%s).' % (
+        self.metric, self.field, self.value, type(self.value))
+
+
+class MonitoringTooManyFieldsError(MonitoringError):
+  """Raised when sending a metric with more than 7 fields."""
+
+  def __init__(self, metric, fields):
+    self.metric = metric
+    self.fields = fields
+
+  def __str__(self):
+    return 'Metric "%s" was given too many (%d > 7) fields: %s.' % (
+        self.metric, len(self.fields), self.fields)
+
+
+class MonitoringNoConfiguredMonitorError(MonitoringError):
+  """Raised when sending a metric without configuring the global Monitor."""
+
+  def __init__(self, metric):
+    self.metric = metric
+
+  def __str__(self):
+    if self.metric is not None:
+      return 'Metric "%s" was sent before initializing the global Monitor.' % (
+          self.metric)
+    else:
+      return 'Metrics were sent before initializing the global Monitor.'
+
+
+class MonitoringNoConfiguredTargetError(MonitoringError):
+  """Raised when sending a metric with no global nor local Target."""
+
+  def __init__(self, metric):
+    self.metric = metric
+
+  def __str__(self):
+    return 'Metric "%s" was sent with no Target configured.' % (self.metric)
+
+
+class MonitoringFailedToFlushAllMetricsError(MonitoringError):
+  """Raised when some error is encountered in flushing specific metrics."""
+
+  def __init__(self, error_count):
+    self.error_count = error_count
+
+  def __str__(self):
+    return ('Failed to flush %d metrics. See tracebacks above' %
+            (self.error_count))
+
+
+class MetricDefinitionError(MonitoringError):
+  """Raised when a metric was defined incorrectly."""
+
+
+class WrongFieldsError(MonitoringError):
+  """Raised when a metric is given different fields to its definition."""
+
+  def __init__(self, metric_name, got, expected):
+    self.metric_name = metric_name
+    self.got = got
+    self.expected = expected
+
+  def __str__(self):
+    return 'Metric "%s" is defined with %s fields but was given %s' % (
+        self.metric_name, self.expected, self.got)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/helpers.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/helpers.py
new file mode 100644
index 0000000..e54ccd9
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/helpers.py
@@ -0,0 +1,155 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helper classes that make it easier to instrument code for monitoring."""
+
+
+from infra_libs.ts_mon.common import metrics
+
+import time
+
+
+class ScopedIncrementCounter(object):
+  """Increment a counter when the wrapped code exits.
+
+  The counter will be given a 'status' = 'success' or 'failure' label whose
+  value will be set to depending on whether the wrapped code threw an exception.
+
+  Example:
+
+    mycounter = Counter('foo/stuff_done')
+    with ScopedIncrementCounter(mycounter):
+      DoStuff()
+
+  To set a custom status label and status value:
+
+    mycounter = Counter('foo/http_requests')
+    with ScopedIncrementCounter(mycounter, 'response_code') as sc:
+      status = MakeHttpRequest()
+      sc.set_status(status)  # This custom status now won't be overwritten if
+                             # the code later raises an exception.
+  """
+
+  def __init__(self, counter, label='status', success_value='success',
+               failure_value='failure'):
+    self.counter = counter
+    self.label = label
+    self.success_value = success_value
+    self.failure_value = failure_value
+    self.status = None
+
+  def set_failure(self):
+    self.set_status(self.failure_value)
+
+  def set_status(self, status):
+    self.status = status
+
+  def __enter__(self):
+    self.status = None
+    return self
+
+  def __exit__(self, exc_type, exc_value, traceback):
+    if self.status is None:
+      if exc_type is None:
+        self.status = self.success_value
+      else:
+        self.status = self.failure_value
+    self.counter.increment({self.label: self.status})
+
+
+class ScopedMeasureTime(object):
+  """Report durations metric with status when the wrapped code exits.
+
+  The metric must be CumulativeDistributionMetric with a field to set status.
+  The status field will be set to 'success' or 'failure' depending on whether
+  the wrapped code threw an exception. The status field values can be customized
+  with constructor kwargs or by calling `set_status`.
+
+  A new instance of this class should be constructed each time it is used.
+
+  Example:
+
+    mymetric = CumulativeDistributionMetric(
+      'xxx/durations', 'duration of xxx op'
+      [StringField('status')],
+      bucketer=ts_mon.GeometricBucketer(10**0.04),
+      units=ts_mon.MetricsDataUnits.SECONDS)
+    with ScopedMeasureTime(mymetric):
+      DoStuff()
+
+  To set a custom label and status value:
+
+    mymetric = CumulativeDistributionMetric(
+      'xxx/durations', 'duration of xxx op'
+      [IntegerField('response_code')],
+      bucketer=ts_mon.GeometricBucketer(10**0.04),
+      units=ts_mon.MetricsDataUnits.MILLISECONDS)
+    with ScopedMeasureTime(mymetric, field='response_code') as sd:
+      sd.set_status(404)  # This custom status now won't be overwritten
+                          # even if exception is raised later.
+
+  To annotate the duration with some other fields, use extra_fields_values:
+
+    mymetric = CumulativeDistributionMetric(
+      'xxx/durations', 'duration of xxx op'
+      [StringField('status'),
+       StringField('type')],
+      bucketer=ts_mon.GeometricBucketer(10**0.04),
+      units=ts_mon.MetricsDataUnits.SECONDS)
+    with ScopedMeasureTime(mymetric, extra_fields_values={'type': 'normal'}):
+      DoStuff()
+  """
+
+  _UNITS_PER_SECOND = {
+      metrics.MetricsDataUnits.SECONDS: 1e0,
+      metrics.MetricsDataUnits.MILLISECONDS: 1e3,
+      metrics.MetricsDataUnits.MICROSECONDS: 1e6,
+      metrics.MetricsDataUnits.NANOSECONDS: 1e9,
+  }
+
+  def __init__(self, metric, field='status', success_value='success',
+               failure_value='failure', extra_fields_values=(),
+               time_fn=time.time):
+    assert isinstance(metric, metrics.CumulativeDistributionMetric)
+    assert sum(1 for spec in metric.field_spec if spec.name == field) == 1, (
+        'typo in field name `%s`?' % field)
+    assert metric.units in self._UNITS_PER_SECOND, (
+        'metric\'s units (%s) is not one of %s' %
+        (metric.units, self._UNITS_PER_SECOND.keys()))
+
+    self._metric = metric
+    self._field_values = dict(extra_fields_values)
+    assert field not in self._field_values
+    self._field_values[field] = None
+    self._field = field
+    self._units_per_second = self._UNITS_PER_SECOND[metric.units]
+    self._success_value = success_value
+    self._failure_value = failure_value
+    self._start_timestamp = None
+    self._time_fn = time_fn
+
+  def set_status(self, status):
+    assert self._start_timestamp is not None, (
+        'set_status must be called only inside with statement')
+    self._field_values[self._field] = status
+
+  def set_failure(self):
+    return self.set_status(self._failure_value)
+
+  def __enter__(self):
+    assert self._start_timestamp is None, ('re-use of ScopedMeasureTime '
+                                           'instances detected')
+    self._start_timestamp = self._time_fn()
+    return self
+
+  def __exit__(self, exc_type, exc_value, traceback):
+    elapsed_seconds = self._time_fn() - self._start_timestamp
+    if self._field_values[self._field] is None:
+      if exc_type is None:
+        self._field_values[self._field] = self._success_value
+      else:
+        self._field_values[self._field] = self._failure_value
+
+    self._metric.add(elapsed_seconds * self._units_per_second,
+                     self._field_values)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/http_metrics.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/http_metrics.py
new file mode 100644
index 0000000..9c806f5
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/http_metrics.py
@@ -0,0 +1,102 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from infra_libs.ts_mon.common import distribution
+from infra_libs.ts_mon.common import metrics
+
+
+# Extending HTTP status codes to client-side errors and timeouts.
+STATUS_OK = 200
+STATUS_ERROR = 901
+STATUS_TIMEOUT = 902
+STATUS_EXCEPTION = 909
+
+
+# 90% of durations are in the range 11-1873ms.  Growth factor 10^0.06 puts that
+# range into 37 buckets.  Max finite bucket value is 12 minutes.
+_duration_bucketer = distribution.GeometricBucketer(10**0.06)
+
+# 90% of sizes are in the range 0.17-217014 bytes.  Growth factor 10^0.1 puts
+# that range into 54 buckets.  Max finite bucket value is 6.3GB.
+_size_bucketer = distribution.GeometricBucketer(10**0.1)
+
+
+request_bytes = metrics.CumulativeDistributionMetric('http/request_bytes',
+    'Bytes sent per http request (body only).', [
+        metrics.StringField('name'),
+        metrics.StringField('client'),
+    ],
+    bucketer=_size_bucketer)
+response_bytes = metrics.CumulativeDistributionMetric('http/response_bytes',
+    'Bytes received per http request (content only).', [
+        metrics.StringField('name'),
+        metrics.StringField('client'),
+    ],
+    bucketer=_size_bucketer)
+durations = metrics.CumulativeDistributionMetric('http/durations',
+    'Time elapsed between sending a request and getting a'
+    ' response (including parsing) in milliseconds.', [
+        metrics.StringField('name'),
+        metrics.StringField('client'),
+    ],
+    bucketer=_duration_bucketer)
+response_status = metrics.CounterMetric('http/response_status',
+    'Number of responses received by HTTP status code.', [
+        metrics.IntegerField('status'),
+        metrics.StringField('name'),
+        metrics.StringField('client'),
+    ])
+
+
+server_request_bytes = metrics.CumulativeDistributionMetric(
+    'http/server_request_bytes',
+    'Bytes received per http request (body only).', [
+        metrics.IntegerField('status'),
+        metrics.StringField('name'),
+        metrics.BooleanField('is_robot'),
+    ],
+    bucketer=_size_bucketer)
+server_response_bytes = metrics.CumulativeDistributionMetric(
+    'http/server_response_bytes',
+    'Bytes sent per http request (content only).', [
+        metrics.IntegerField('status'),
+        metrics.StringField('name'),
+        metrics.BooleanField('is_robot'),
+    ],
+    bucketer=_size_bucketer)
+server_durations = metrics.CumulativeDistributionMetric('http/server_durations',
+    'Time elapsed between receiving a request and sending a'
+    ' response (including parsing) in milliseconds.', [
+        metrics.IntegerField('status'),
+        metrics.StringField('name'),
+        metrics.BooleanField('is_robot'),
+    ],
+    bucketer=_duration_bucketer)
+server_response_status = metrics.CounterMetric('http/server_response_status',
+    'Number of responses sent by HTTP status code.', [
+        metrics.IntegerField('status'),
+        metrics.StringField('name'),
+        metrics.BooleanField('is_robot'),
+    ])
+
+
+def update_http_server_metrics(endpoint_name, response_status_code, elapsed_ms,
+                               request_size=None, response_size=None,
+                               user_agent=None):
+  fields = {'status': response_status_code, 'name': endpoint_name,
+            'is_robot': False}
+  if user_agent is not None:
+    # We must not log user agents, but we can store whether or not the
+    # user agent string indicates that the requester was a Google bot.
+    fields['is_robot'] = (
+        'GoogleBot' in user_agent or
+        'GoogleSecurityScanner' in user_agent or
+        user_agent == 'B3M/prober')
+
+  server_durations.add(elapsed_ms, fields=fields)
+  server_response_status.increment(fields=fields)
+  if request_size is not None:
+    server_request_bytes.add(request_size, fields=fields)
+  if response_size is not None:
+    server_response_bytes.add(response_size, fields=fields)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/interface.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/interface.py
new file mode 100644
index 0000000..390c942
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/interface.py
@@ -0,0 +1,298 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Classes representing the monitoring interface for tasks or devices.
+
+Usage:
+  import argparse
+  from infra_libs import ts_mon
+
+  p = argparse.ArgumentParser()
+  ts_mon.add_argparse_options(p)
+  args = p.parse_args()  # Must contain info for Monitor (and optionally Target)
+  ts_mon.process_argparse_options(args)
+
+  # Will use the default Target set up via command line args:
+  m = ts_mon.BooleanMetric('/my/metric/name', fields={'foo': 1, 'bar': 'baz'})
+  m.set(True)
+
+  # Use a custom Target:
+  t = ts_mon.TaskTarget('service', 'job', 'region', 'host')  # or DeviceTarget
+  m2 = ts_mon.GaugeMetric('/my/metric/name2', fields={'asdf': 'qwer'}, target=t)
+  m2.set(5)
+
+Library usage:
+  from infra_libs.ts_mon import CounterMetric
+  # No need to set up Monitor or Target, assume calling code did that.
+  c = CounterMetric('/my/counter', fields={'source': 'mylibrary'})
+  c.set(0)
+  for x in range(100):
+    c.increment()
+"""
+
+import datetime
+import logging
+import random
+import threading
+import time
+
+from infra_libs.ts_mon.common import errors
+from infra_libs.ts_mon.common import metric_store
+from infra_libs.ts_mon.protos import metrics_pb2
+
+# The maximum number of MetricsData messages to include in each HTTP request.
+# MetricsCollections larger than this will be split into multiple requests.
+METRICS_DATA_LENGTH_LIMIT = 500
+
+
+class State(object):
+  """Package-level state is stored here so that it is easily accessible.
+
+  Configuration is kept in this one object at the global level so that all
+  libraries in use by the same tool or service can all take advantage of the
+  same configuration.
+  """
+
+  def __init__(self, store_ctor=None, target=None):
+    """Optional arguments are for unit tests."""
+    if store_ctor is None:  # pragma: no branch
+      store_ctor = metric_store.InProcessMetricStore
+    # The Monitor object that will be used to send all metrics.
+    self.global_monitor = None
+    # The Target object that will be paired with all metrics that don't supply
+    # their own.
+    self.target = target
+    # The flush mode being used to control when metrics are pushed.
+    self.flush_mode = None
+    # A predicate to determine if metrics should be sent.
+    self.flush_enabled_fn = lambda: True
+    # The background thread that flushes metrics every
+    # --ts-mon-flush-interval-secs seconds.  May be None if
+    # --ts-mon-flush != 'auto' or --ts-mon-flush-interval-secs == 0.
+    self.flush_thread = None
+    # All metrics created by this application.
+    self.metrics = {}
+    # The MetricStore object that holds the actual metric values.
+    self.store = store_ctor(self)
+    # Cached time of the last flush. Useful mostly in AppEngine apps.
+    self.last_flushed = datetime.datetime.utcfromtimestamp(0)
+    # Metric name prefix
+    self.metric_name_prefix = '/chrome/infra/'
+    # Metrics registered with register_global_metrics.  Keyed by metric name.
+    self.global_metrics = {}
+    # Callbacks registered with register_global_metrics_callback.  Keyed by the
+    # arbitrary string provided by the user.  Called before each flush.
+    self.global_metrics_callbacks = {}
+    # Whether to call invoke_global_callbacks() on every flush().  Set to False
+    # on Appengine because it does its own thing.
+    self.invoke_global_callbacks_on_flush = True
+
+  def reset_for_unittest(self):
+    self.metrics = {}
+    self.global_metrics = {}
+    self.global_metrics_callbacks = {}
+    self.invoke_global_callbacks_on_flush = True
+    self.last_flushed = datetime.datetime.utcfromtimestamp(0)
+    self.store.reset_for_unittest()
+
+state = State()
+
+
+def flush():
+  """Send all metrics that are registered in the application."""
+  if not state.flush_enabled_fn():
+    logging.debug('ts_mon: sending metrics is disabled.')
+    return
+
+  if not state.global_monitor or not state.target:
+    raise errors.MonitoringNoConfiguredMonitorError(None)
+
+  if state.invoke_global_callbacks_on_flush:
+    invoke_global_callbacks()
+
+  rpcs = []
+  for proto in _generate_proto():
+    rpcs.append(state.global_monitor.send(proto))
+  for rpc in rpcs:
+    if rpc is not None:
+      state.global_monitor.wait(rpc)
+  state.last_flushed = datetime.datetime.utcnow()
+
+
+def _generate_proto():
+  """Generate MetricsPayload for global_monitor.send()."""
+  proto = metrics_pb2.MetricsPayload()
+
+  # Key: Target, value: MetricsCollection.
+  collections = {}
+
+  # Key: (Target, metric name) tuple, value: MetricsDataSet.
+  data_sets = {}
+
+  count = 0
+  for (target, metric, start_time, end_time, fields_values
+       ) in state.store.get_all():
+    for fields, value in fields_values.items():
+      if count >= METRICS_DATA_LENGTH_LIMIT:
+        yield proto
+        proto = metrics_pb2.MetricsPayload()
+        collections.clear()
+        data_sets.clear()
+        count = 0
+
+      if target not in collections:
+        collections[target] = proto.metrics_collection.add()
+        target.populate_target_pb(collections[target])
+      collection = collections[target]
+
+      key = (target, metric.name)
+      new_data_set = None
+      if key not in data_sets:
+        new_data_set = metrics_pb2.MetricsDataSet()
+        metric.populate_data_set(new_data_set)
+
+      data = metrics_pb2.MetricsData()
+      metric.populate_data(data, start_time, end_time, fields, value)
+
+      # All required data protos have been successfully populated. Now we can
+      # insert them in serialized proto and bookeeping data structures.
+      if new_data_set is not None:
+        collection.metrics_data_set.add().CopyFrom(new_data_set)
+        data_sets[key] = collection.metrics_data_set[-1]
+      data_sets[key].data.add().CopyFrom(data)
+      count += 1
+
+  if count > 0:
+    yield proto
+
+
+def register(metric):
+  """Adds the metric to the list of metrics sent by flush().
+
+  This is called automatically by Metric's constructor.
+  """
+  # If someone is registering the same metric object twice, that's okay, but
+  # registering two different metric objects with the same metric name is not.
+  for m in state.metrics.values():
+    if metric == m:
+      state.metrics[metric.name] = metric
+      return
+  if metric.name in state.metrics:
+    raise errors.MonitoringDuplicateRegistrationError(metric.name)
+
+  state.metrics[metric.name] = metric
+
+
+def unregister(metric):
+  """Removes the metric from the list of metrics sent by flush()."""
+  del state.metrics[metric.name]
+
+
+def close():
+  """Stops any background threads and waits for them to exit."""
+  if state.flush_thread is not None:
+    state.flush_thread.stop()
+
+
+def reset_for_unittest(disable=False):
+  state.reset_for_unittest()
+  state.flush_enabled_fn = lambda: not disable
+
+
+def register_global_metrics(metrics):
+  """Declare metrics as global.
+
+  Outside Appengine this has no effect.
+
+  On Appengine, registering a metric as "global" simply means it will be reset
+  every time the metric is sent. This allows any instance to send such a metric
+  to a shared stream, e.g. by overriding target fields like task_num (instance
+  ID), host_name (version) or job_name (module name).
+
+  There is no "unregister". Multiple calls add up. It only needs to be called
+  once, similar to gae_ts_mon.initialize().
+
+  Args:
+    metrics (iterable): a collection of Metric objects.
+  """
+  state.global_metrics.update({m.name: m for m in metrics})
+
+
+def register_global_metrics_callback(name, callback):
+  """Register a named function to compute global metrics values.
+
+  There can only be one callback for a given name. Setting another callback with
+  the same name will override the previous one. To disable a callback, set its
+  function to None.
+
+  Args:
+    name (string): name of the callback.
+    callback (function): this function will be called without arguments every
+      minute.  On Appengine it is called once for the whole application from the
+      gae_ts_mon cron job. It is intended to set the values of the global
+      metrics.
+  """
+  if not callback:
+    if name in state.global_metrics_callbacks:
+      del state.global_metrics_callbacks[name]
+  else:
+    state.global_metrics_callbacks[name] = callback
+
+
+def invoke_global_callbacks():
+  for name, callback in state.global_metrics_callbacks.items():
+    logging.debug('Invoking callback %s', name)
+    try:
+      callback()
+    except Exception:
+      logging.exception('Monitoring global callback "%s" failed', name)
+
+
+class _FlushThread(threading.Thread):
+  """Background thread that flushes metrics on an interval."""
+
+  def __init__(self, interval_secs, stop_event=None):
+    super(_FlushThread, self).__init__(name='ts_mon')
+
+    if stop_event is None:
+      stop_event = threading.Event()
+
+    self.daemon = True
+    self.interval_secs = interval_secs
+    self.stop_event = stop_event
+
+  def _flush_and_log_exceptions(self):
+    try:
+      flush()
+    except Exception:
+      logging.exception('Automatic monitoring flush failed.')
+
+  def run(self):
+    # Jitter the first interval so tasks started at the same time (say, by cron)
+    # on different machines don't all send metrics simultaneously.
+    next_timeout = random.uniform(self.interval_secs / 2.0, self.interval_secs)
+
+    while True:
+      if self.stop_event.wait(next_timeout):
+        return
+
+      # Try to flush every N seconds exactly so rate calculations are more
+      # consistent.
+      start = time.time()
+      self._flush_and_log_exceptions()
+      flush_duration = time.time() - start
+      next_timeout = self.interval_secs - flush_duration
+
+      if next_timeout < 0:
+        logging.warning(
+            'Last monitoring flush took %f seconds (longer than '
+            '--ts-mon-flush-interval-secs = %f seconds)',
+            flush_duration, self.interval_secs)
+        next_timeout = 0
+
+  def stop(self):
+    """Stops the background thread and performs a final flush."""
+
+    self.stop_event.set()
+    self.join()
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/metric_store.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/metric_store.py
new file mode 100644
index 0000000..b08b649
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/metric_store.py
@@ -0,0 +1,232 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import copy
+import itertools
+import threading
+import time
+
+from infra_libs.ts_mon.common import errors
+
+
+def default_modify_fn(name):
+  def _modify_fn(value, delta):
+    if delta < 0:
+      raise errors.MonitoringDecreasingValueError(name, None, delta)
+    return value + delta
+  return _modify_fn
+
+
+class MetricStore(object):
+  """A place to store values for each metric.
+
+  Several methods take "a normalized field tuple".  This is a tuple of
+  (key, value) tuples sorted by key.  (The reason this is given as a tuple
+  instead of a dict is because tuples are hashable and can be used as dict keys,
+  dicts can not).
+
+  The MetricStore is also responsible for keeping the start_time of each metric.
+  This is what goes into the start_timestamp_us field in the MetricsData proto
+  for cumulative metrics and distributions, and helps Monarch identify when a
+  counter was reset.  This is the MetricStore's job because an implementation
+  might share counter values across multiple instances of a task (like on
+  Appengine), so the start time must be associated with that value so that it
+  can be reset for all tasks at once when the value is reset.
+
+  External metric stores (like those backed by memcache) may be cleared (either
+  wholly or partially) at any time.  When this happens the MetricStore *must*
+  generate a new start_time for all the affected metrics.
+
+  Metrics can specify their own explicit start time if they are mirroring the
+  value of some external counter that started counting at a known time.
+
+  Otherwise the MetricStore's time_fn (defaults to time.time()) is called the
+  first time a metric is set or incremented, or after it is cleared externally.
+  """
+
+  def __init__(self, state, time_fn=None):
+    self._state = state
+    self._time_fn = time_fn or time.time
+
+  def get(self, name, fields, target_fields, default=None):
+    """Fetches the current value for the metric.
+
+    Args:
+      name (string): the metric's name.
+      fields (tuple): a normalized field tuple.
+      target_fields (dict or None): target fields to override.
+      default: the value to return if the metric has no value of this set of
+          field values.
+    """
+    raise NotImplementedError
+
+  def get_all(self):
+    """Returns an iterator over all the metrics present in the store.
+
+    The iterator yields 5-tuples:
+      (target, metric, start_time, end_time, field_values)
+    """
+    raise NotImplementedError
+
+  def set(self, name, fields, target_fields, value, enforce_ge=False):
+    """Sets the metric's value.
+
+    Args:
+      name: the metric's name.
+      fields: a normalized field tuple.
+      target_fields (dict or None): target fields to override.
+      value: the new value for the metric.
+      enforce_ge: if this is True, raise an exception if the new value is
+          less than the old value.
+
+    Raises:
+      MonitoringDecreasingValueError: if enforce_ge is True and the new value is
+          smaller than the old value.
+    """
+    raise NotImplementedError
+
+  def incr(self, name, fields, target_fields, delta, modify_fn=None):
+    """Increments the metric's value.
+
+    Args:
+      name: the metric's name.
+      fields: a normalized field tuple.
+      target_fields (dict or None): target fields to override.
+      delta: how much to increment the value by.
+      modify_fn: this function is called with the original value and the delta
+          as its arguments and is expected to return the new value.  The
+          function must be idempotent as it may be called multiple times.
+    """
+    raise NotImplementedError
+
+  def reset_for_unittest(self, name=None):
+    """Clears the values metrics.  Useful in unittests.
+
+    Args:
+      name: the name of an individual metric to reset, or if None resets all
+        metrics.
+    """
+    raise NotImplementedError
+
+  def _start_time(self, name):
+    if name in self._state.metrics:
+      ret = self._state.metrics[name].start_time
+      if ret is not None:
+        return ret
+
+    return self._time_fn()
+
+
+class _TargetFieldsValues(object):
+  """Holds all values for a single metric.
+
+  Values are keyed by metric fields and target fields (which override the
+  default target fields configured globally for the process).
+  """
+
+  def __init__(self, start_time):
+    self.start_time = start_time
+
+    # {normalized_target_fields: {normalized_metric_fields: value}}
+    self._values = collections.defaultdict(dict)
+
+  def _get_target_values(self, target_fields):
+    # Normalize the target fields by converting them into a hashable tuple.
+    if not target_fields:
+      target_fields = {}
+    key = tuple(sorted(target_fields.items()))
+
+    return self._values[key]
+
+  def get_value(self, fields, target_fields, default=None):
+    return self._get_target_values(target_fields).get(
+        fields, default)
+
+  def set_value(self, fields, target_fields, value):
+    self._get_target_values(target_fields)[fields] = value
+
+  def iter_targets(self, default_target):
+    for target_fields, fields_values in self._values.items():
+      if target_fields:
+        target = copy.copy(default_target)
+        target.update({k: v for k, v in target_fields})
+      else:
+        target = default_target
+      yield target, fields_values
+
+  def __deepcopy__(self, memo_dict):
+    ret = _TargetFieldsValues(self.start_time)
+    ret._values = copy.deepcopy(self._values, memo_dict)
+    return ret
+
+
+class InProcessMetricStore(MetricStore):
+  """A thread-safe metric store that keeps values in memory."""
+
+  def __init__(self, state, time_fn=None):
+    super(InProcessMetricStore, self).__init__(state, time_fn=time_fn)
+
+    self._values = {}
+    self._thread_lock = threading.Lock()
+
+  def _entry(self, name):
+    if name not in self._values:
+      self._reset(name)
+
+    return self._values[name]
+
+  def get(self, name, fields, target_fields, default=None):
+    return self._entry(name).get_value(fields, target_fields, default)
+
+  def iter_field_values(self, name):
+    return itertools.chain.from_iterable(
+        x.items() for _, x
+        in self._entry(name).iter_targets(self._state.target))
+
+  def get_all(self):
+    # Make a copy of the metric values in case another thread (or this
+    # generator's consumer) modifies them while we're iterating.
+    with self._thread_lock:
+      values = copy.deepcopy(self._values)
+    end_time = self._time_fn()
+
+    for name, metric_values in values.items():
+      if name not in self._state.metrics:
+        continue
+      start_time = metric_values.start_time
+      for target, fields_values in metric_values.iter_targets(
+          self._state.target):
+        yield (target, self._state.metrics[name], start_time, end_time,
+               fields_values)
+
+  def set(self, name, fields, target_fields, value, enforce_ge=False):
+    with self._thread_lock:
+      if enforce_ge:
+        old_value = self._entry(name).get_value(fields, target_fields, 0)
+        if value < old_value:
+          raise errors.MonitoringDecreasingValueError(name, old_value, value)
+
+      self._entry(name).set_value(fields, target_fields, value)
+
+  def incr(self, name, fields, target_fields, delta, modify_fn=None):
+    if delta < 0:
+      raise errors.MonitoringDecreasingValueError(name, None, delta)
+
+    if modify_fn is None:
+      modify_fn = default_modify_fn(name)
+
+    with self._thread_lock:
+      self._entry(name).set_value(fields, target_fields, modify_fn(
+          self.get(name, fields, target_fields, 0), delta))
+
+  def reset_for_unittest(self, name=None):
+    if name is not None:
+      self._reset(name)
+    else:
+      for name in self._values.keys():
+        self._reset(name)
+
+  def _reset(self, name):
+    self._values[name] = _TargetFieldsValues(self._start_time(name))
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/metrics.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/metrics.py
new file mode 100644
index 0000000..00d0c63
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/metrics.py
@@ -0,0 +1,561 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Classes representing individual metrics that can be sent."""
+
+import re
+
+import six
+
+from infra_libs.ts_mon.protos import metrics_pb2
+
+from infra_libs.ts_mon.common import distribution
+from infra_libs.ts_mon.common import errors
+from infra_libs.ts_mon.common import interface
+
+
+MICROSECONDS_PER_SECOND = 1000000
+
+
+class Field(object):
+  FIELD_NAME_PATTERN = re.compile(r'[A-Za-z_][A-Za-z0-9_]*')
+
+  allowed_python_types = None
+  type_enum = None
+  field_name = None
+
+  def __init__(self, name):
+    if not self.FIELD_NAME_PATTERN.match(name):
+      raise errors.MetricDefinitionError(
+          'Invalid metric field name "%s" - must match the regex "%s"' % (
+                name, self.FIELD_NAME_PATTERN.pattern))
+
+    self.name = name
+
+  def __eq__(self, other):
+    return (type(self) == type(other) and
+            self.__dict__ == other.__dict__)
+
+  def validate_value(self, metric_name, value):
+    if not isinstance(value, self.allowed_python_types):
+      raise errors.MonitoringInvalidFieldTypeError(
+          metric_name, self.name, value)
+
+  def populate_proto(self, proto, value):
+    setattr(proto, self.field_name, value)
+
+
+class StringField(Field):
+  allowed_python_types = six.string_types
+  type_enum = metrics_pb2.MetricsDataSet.MetricFieldDescriptor.STRING
+  field_name = 'string_value'
+
+
+class IntegerField(Field):
+  allowed_python_types = six.integer_types
+  type_enum = metrics_pb2.MetricsDataSet.MetricFieldDescriptor.INT64
+  field_name = 'int64_value'
+
+
+class BooleanField(Field):
+  allowed_python_types = bool
+  type_enum = metrics_pb2.MetricsDataSet.MetricFieldDescriptor.BOOL
+  field_name = 'bool_value'
+
+
+class Metric(object):
+  """Abstract base class for a metric.
+
+  A Metric is an attribute that may be monitored across many targets. Examples
+  include disk usage or the number of requests a server has received. A single
+  process may keep track of many metrics.
+
+  Note that Metric objects may be initialized at any time (for example, at the
+  top of a library), but cannot be sent until the underlying Monitor object
+  has been set up (usually by the top-level process parsing the command line).
+
+  A Metric can actually store multiple values that are identified by a set of
+  fields (which are themselves key-value pairs).  Fields can be passed to the
+  set() or increment() methods to modify a particular value, or passed to the
+  constructor in which case they will be used as the defaults for this Metric.
+
+  The unit of measurement for Metric data should be specified with
+  MetricsDataUnits when a Metric object is created:
+  e.g., MetricsDataUnits.SECONDS, MetricsDataUnits.BYTES, and etc..,
+  See `MetricsDataUnits` class for a full list of units.
+
+  Do not directly instantiate an object of this class.
+  Use the concrete child classes instead:
+  * StringMetric for metrics with string value
+  * BooleanMetric for metrics with boolean values
+  * CounterMetric for metrics with monotonically increasing integer values
+  * GaugeMetric for metrics with arbitrarily varying integer values
+  * CumulativeMetric for metrics with monotonically increasing float values
+  * FloatMetric for metrics with arbitrarily varying float values
+
+  See http://go/inframon-doc for help designing and using your metrics.
+  """
+
+  def __init__(self, name, description, field_spec, units=None):
+    """Create an instance of a Metric.
+
+    Args:
+      name (str): the file-like name of this metric
+      description (string): help string for the metric. Should be enough to
+                            know what the metric is about.
+      field_spec (list): a list of Field subclasses to define the fields that
+                         are allowed on this metric.  Pass a list of either
+                         StringField, IntegerField or BooleanField here.
+      units (string): the unit used to measure data for given metric. Some
+                      common units are pre-defined in the MetricsDataUnits
+                      class.
+    """
+    field_spec = field_spec or []
+
+    self._name = name.lstrip('/')
+
+    if not isinstance(description, six.string_types):
+      raise errors.MetricDefinitionError('Metric description must be a string')
+    if not description:
+      raise errors.MetricDefinitionError('Metric must have a description')
+    if (not isinstance(field_spec, (list, tuple)) or
+        any(not isinstance(x, Field) for x in field_spec)):
+      raise errors.MetricDefinitionError(
+          'Metric constructor takes a list of Fields, or None')
+    if len(field_spec) > 7:
+      raise errors.MonitoringTooManyFieldsError(self._name, field_spec)
+
+    self._start_time = None
+    self._field_spec = field_spec
+    self._sorted_field_names = sorted(x.name for x in field_spec)
+    self._description = description
+    self._units = units
+
+    interface.register(self)
+
+  def __eq__(self, other):
+    return (type(self) == type(other)
+            and self.__dict__ == other.__dict__)
+
+  @property
+  def field_spec(self):
+    return list(self._field_spec)
+
+  @property
+  def name(self):
+    return self._name
+
+  @property
+  def start_time(self):
+    return self._start_time
+
+  @property
+  def units(self):
+    return self._units
+
+  def is_cumulative(self):
+    raise NotImplementedError()
+
+  def unregister(self):
+    interface.unregister(self)
+
+  def populate_data_set(self, data_set):
+    """Populate MetricsDataSet."""
+    data_set.metric_name = '%s%s' % (interface.state.metric_name_prefix,
+                                     self._name)
+    data_set.description = self._description or ''
+    if self._units is not None:
+      data_set.annotations.unit = self._units
+
+    if self.is_cumulative():
+      data_set.stream_kind = metrics_pb2.CUMULATIVE
+    else:
+      data_set.stream_kind = metrics_pb2.GAUGE
+
+    self._populate_value_type(data_set)
+    self._populate_field_descriptors(data_set)
+
+  def populate_data(self, data, start_time, end_time, fields, value):
+    """Populate a new metrics_pb2.MetricsData.
+
+    Args:
+      data (metrics_pb2.MetricsData): protocol buffer into
+        which to populate the current metric values.
+      start_time (int): timestamp in microseconds since UNIX epoch.
+    """
+    data.start_timestamp.seconds = int(start_time)
+    data.end_timestamp.seconds = int(end_time)
+
+    self._populate_fields(data, fields)
+    self._populate_value(data, value)
+
+  def _populate_field_descriptors(self, data_set):
+    """Populate `field_descriptor` in MetricsDataSet.
+
+    Args:
+      data_set (metrics_pb2.MetricsDataSet): a data set protobuf to populate
+    """
+    for spec in self._field_spec:
+      descriptor = data_set.field_descriptor.add()
+      descriptor.name = spec.name
+      descriptor.field_type = spec.type_enum
+
+  def _populate_fields(self, data, field_values):
+    """Fill in the fields attribute of a metric protocol buffer.
+
+    Args:
+      metric (metrics_pb2.MetricsData): a metrics protobuf to populate
+      field_values (tuple): field values
+    """
+    for spec, value in zip(self._field_spec, field_values):
+      field = data.field.add()
+      field.name = spec.name
+      spec.populate_proto(field, value)
+
+  def _validate_fields(self, fields):
+    """Checks the correct number and types of field values were provided.
+
+    Args:
+      fields (dict): A dict of field values given by the user, or None.
+
+    Returns:
+      fields' values as a tuple, in the same order as the field_spec.
+
+    Raises:
+      WrongFieldsError: if you provide a different number of fields to those
+        the metric was defined with.
+      MonitoringInvalidFieldTypeError: if the field value was the wrong type for
+        the field spec.
+    """
+    fields = fields or {}
+
+    if not isinstance(fields, dict):
+      raise ValueError('fields should be a dict, got %r (%s)' % (
+          fields, type(fields)))
+
+    if sorted(fields) != self._sorted_field_names:
+      raise errors.WrongFieldsError(
+          self.name, fields.keys(), self._sorted_field_names)
+
+    for spec in self._field_spec:
+      spec.validate_value(self.name, fields[spec.name])
+
+    return tuple(fields[spec.name] for spec in self._field_spec)
+
+  def _populate_value(self, data, value):
+    """Fill in the the data values of a metric protocol buffer.
+
+    Args:
+      data (metrics_pb2.MetricsData): a metrics protobuf to populate
+      value (see concrete class): the value of the metric to be set
+    """
+    raise NotImplementedError()
+
+  def _populate_value_type(self, data_set):
+    """Fill in the the data values of a metric protocol buffer.
+
+    Args:
+      data_set (metrics_pb2.MetricsDataSet): a MetricsDataSet protobuf to
+          populate
+    """
+    raise NotImplementedError()
+
+  def set(self, value, fields=None, target_fields=None):
+    """Set a new value for this metric. Results in sending a new value.
+
+    The subclass should do appropriate type checking on value and then call
+    self._set_and_send_value.
+
+    Args:
+      value (see concrete class): the value of the metric to be set
+      fields (dict): metric field values
+      target_fields (dict): overwrite some of the default target fields
+    """
+    raise NotImplementedError()
+
+  def get(self, fields=None, target_fields=None):
+    """Returns the current value for this metric.
+
+    Subclasses should never use this to get a value, modify it and set it again.
+    Instead use _incr with a modify_fn.
+    """
+    return interface.state.store.get(
+        self.name, self._validate_fields(fields), target_fields)
+
+  def get_all(self):
+    return interface.state.store.iter_field_values(self.name)
+
+  def reset(self):
+    """Clears the values of this metric.  Useful in unit tests.
+
+    It might be easier to call ts_mon.reset_for_unittest() in your setUp()
+    method instead of resetting every individual metric.
+    """
+
+    interface.state.store.reset_for_unittest(self.name)
+
+  def _set(self, fields, target_fields, value, enforce_ge=False):
+    interface.state.store.set(
+        self.name, self._validate_fields(fields), target_fields,
+        value, enforce_ge=enforce_ge)
+
+  def _incr(self, fields, target_fields, delta, modify_fn=None):
+    interface.state.store.incr(
+        self.name, self._validate_fields(fields), target_fields,
+        delta, modify_fn=modify_fn)
+
+
+class StringMetric(Metric):
+  """A metric whose value type is a string."""
+
+  def _populate_value(self, data, value):
+    data.string_value = value
+
+  def _populate_value_type(self, data_set):
+    data_set.value_type = metrics_pb2.STRING
+
+  def set(self, value, fields=None, target_fields=None):
+    if not isinstance(value, six.string_types):
+      raise errors.MonitoringInvalidValueTypeError(self._name, value)
+    self._set(fields, target_fields, value)
+
+  def is_cumulative(self):
+    return False
+
+
+class BooleanMetric(Metric):
+  """A metric whose value type is a boolean."""
+
+  def _populate_value(self, data, value):
+    data.bool_value = value
+
+  def _populate_value_type(self, data_set):
+    data_set.value_type = metrics_pb2.BOOL
+
+  def set(self, value, fields=None, target_fields=None):
+    if not isinstance(value, bool):
+      raise errors.MonitoringInvalidValueTypeError(self._name, value)
+    self._set(fields, target_fields, value)
+
+  def is_cumulative(self):
+    return False
+
+
+class NumericMetric(Metric):  # pylint: disable=abstract-method
+  """Abstract base class for numeric (int or float) metrics."""
+
+  def increment(self, fields=None, target_fields=None):
+    self._incr(fields, target_fields, 1)
+
+  def increment_by(self, step, fields=None, target_fields=None):
+    self._incr(fields, target_fields, step)
+
+
+class CounterMetric(NumericMetric):
+  """A metric whose value type is a monotonically increasing integer."""
+
+  def __init__(self, name, description, field_spec, start_time=None,
+               units=None):
+    self._start_time = start_time
+    super(CounterMetric, self).__init__(
+        name, description, field_spec, units=units)
+
+  def _populate_value(self, data, value):
+    data.int64_value = value
+
+  def _populate_value_type(self, data_set):
+    data_set.value_type = metrics_pb2.INT64
+
+  def set(self, value, fields=None, target_fields=None):
+    if not isinstance(value, six.integer_types):
+      raise errors.MonitoringInvalidValueTypeError(self._name, value)
+    self._set(fields, target_fields, value, enforce_ge=True)
+
+  def increment_by(self, step, fields=None, target_fields=None):
+    if not isinstance(step, six.integer_types):
+      raise errors.MonitoringInvalidValueTypeError(self._name, step)
+    self._incr(fields, target_fields, step)
+
+  def is_cumulative(self):
+    return True
+
+
+class GaugeMetric(NumericMetric):
+  """A metric whose value type is an integer."""
+
+  def _populate_value(self, data, value):
+    data.int64_value = value
+
+  def _populate_value_type(self, data_set):
+    data_set.value_type = metrics_pb2.INT64
+
+  def set(self, value, fields=None, target_fields=None):
+    if not isinstance(value, six.integer_types):
+      raise errors.MonitoringInvalidValueTypeError(self._name, value)
+    self._set(fields, target_fields, value)
+
+  def is_cumulative(self):
+    return False
+
+
+class CumulativeMetric(NumericMetric):
+  """A metric whose value type is a monotonically increasing float."""
+
+  def __init__(self, name, description, field_spec, start_time=None,
+               units=None):
+    self._start_time = start_time
+    super(CumulativeMetric, self).__init__(
+        name, description, field_spec, units=units)
+
+  def _populate_value(self, data, value):
+    data.double_value = value
+
+  def _populate_value_type(self, data_set):
+    data_set.value_type = metrics_pb2.DOUBLE
+
+  def set(self, value, fields=None, target_fields=None):
+    if not isinstance(value, (float, int)):
+      raise errors.MonitoringInvalidValueTypeError(self._name, value)
+    self._set(fields, target_fields, float(value), enforce_ge=True)
+
+  def is_cumulative(self):
+    return True
+
+
+class FloatMetric(NumericMetric):
+  """A metric whose value type is a float."""
+
+  def _populate_value(self, metric, value):
+    metric.double_value = value
+
+  def _populate_value_type(self, data_set_pb):
+    data_set_pb.value_type = metrics_pb2.DOUBLE
+
+  def set(self, value, fields=None, target_fields=None):
+    if not isinstance(value, (float, int)):
+      raise errors.MonitoringInvalidValueTypeError(self._name, value)
+    self._set(fields, target_fields, float(value))
+
+  def is_cumulative(self):
+    return False
+
+
+class _DistributionMetricBase(Metric):
+  """A metric that holds a distribution of values.
+
+  By default buckets are chosen from a geometric progression, each bucket being
+  approximately 1.59 times bigger than the last.  In practice this is suitable
+  for many kinds of data, but you may want to provide a FixedWidthBucketer or
+  GeometricBucketer with different parameters."""
+
+  def __init__(self, name, description, field_spec, is_cumulative=True,
+               bucketer=None, start_time=None, units=None):
+    self._start_time = start_time
+
+    if bucketer is None:
+      bucketer = distribution.GeometricBucketer()
+
+    self._is_cumulative = is_cumulative
+    self.bucketer = bucketer
+    super(_DistributionMetricBase, self).__init__(
+        name, description, field_spec, units=units)
+
+  def _populate_value(self, metric, value):
+    pb = metric.distribution_value
+
+    # Copy the bucketer params.
+    if value.bucketer.width == 0:
+      pb.exponential_buckets.growth_factor = value.bucketer.growth_factor
+      pb.exponential_buckets.scale = value.bucketer.scale
+      pb.exponential_buckets.num_finite_buckets = (
+          value.bucketer.num_finite_buckets)
+    else:
+      pb.linear_buckets.width = value.bucketer.width
+      pb.linear_buckets.offset = 0.0
+      pb.linear_buckets.num_finite_buckets = value.bucketer.num_finite_buckets
+
+    # Copy the distribution bucket values.  Include the overflow buckets on
+    # either end.
+    pb.bucket_count.extend(
+        value.buckets.get(i, 0) for i in
+        range(0, value.bucketer.total_buckets))
+
+    pb.count = value.count
+    pb.mean = float(value.sum) / max(value.count, 1)
+
+  def _populate_value_type(self, data_set_pb):
+    data_set_pb.value_type = metrics_pb2.DISTRIBUTION
+
+  def add(self, value, fields=None, target_fields=None):
+    def modify_fn(dist, value):
+      if dist == 0:
+        dist = distribution.Distribution(self.bucketer)
+      dist.add(value)
+      return dist
+
+    self._incr(fields, target_fields, value, modify_fn=modify_fn)
+
+  def set(self, value, fields=None, target_fields=None):
+    """Replaces the distribution with the given fields with another one.
+
+    This only makes sense on non-cumulative DistributionMetrics.
+
+    Args:
+      value: A infra_libs.ts_mon.Distribution.
+    """
+
+    if self._is_cumulative:
+      raise TypeError(
+          'Cannot set() a cumulative DistributionMetric (use add() instead)')
+
+    if not isinstance(value, distribution.Distribution):
+      raise errors.MonitoringInvalidValueTypeError(self._name, value)
+
+    self._set(fields, target_fields, value)
+
+  def is_cumulative(self):
+    return self._is_cumulative
+
+
+class CumulativeDistributionMetric(_DistributionMetricBase):
+  """A DistributionMetric with is_cumulative set to True."""
+
+  def __init__(self, name, description, field_spec, bucketer=None, units=None):
+    super(CumulativeDistributionMetric, self).__init__(
+        name, description, field_spec,
+        is_cumulative=True,
+        bucketer=bucketer,
+        units=units)
+
+
+class NonCumulativeDistributionMetric(_DistributionMetricBase):
+  """A DistributionMetric with is_cumulative set to False."""
+
+  def __init__(self, name, description, field_spec, bucketer=None, units=None):
+    super(NonCumulativeDistributionMetric, self).__init__(
+        name, description, field_spec,
+        is_cumulative=False,
+        bucketer=bucketer,
+        units=units)
+
+
+class MetricsDataUnits(object):
+  """An container for units of measurement for Metrics data."""
+
+  UNKNOWN_UNITS = '{unknown}'
+  SECONDS = 's'
+  MILLISECONDS = 'ms'
+  MICROSECONDS = 'us'
+  NANOSECONDS = 'ns'
+  BITS = 'B'
+  BYTES = 'By'
+  KILOBYTES = 'kBy'
+  MEGABYTES = 'MBy'
+  GIGABYTES = 'GBy'
+  KIBIBYTES = 'kiBy'
+  MEBIBYTES = 'MiBy'
+  GIBIBYTES = 'GiBy'
+  AMPS = 'A'
+  MILLIAMPS = 'mA'
+  DEGREES_CELSIUS = 'Cel'
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/monitors.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/monitors.py
new file mode 100644
index 0000000..38a483e
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/monitors.py
@@ -0,0 +1,162 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Classes representing the monitoring interface for tasks or devices."""
+
+import json
+import logging
+import socket
+
+import httplib2
+
+from googleapiclient import errors
+from infra_libs import httplib2_utils
+from infra_libs.ts_mon.common import pb_to_popo
+try: # pragma: no cover
+  from oauth2client import gce
+except ImportError: # pragma: no cover
+  from oauth2client.contrib import gce
+from oauth2client.client import GoogleCredentials
+from oauth2client.file import Storage
+
+# Special string that can be passed through as the credentials path to use the
+# default Appengine or GCE service account.
+APPENGINE_CREDENTIALS = ':appengine'
+GCE_CREDENTIALS = ':gce'
+
+
+class CredentialFactory(object):
+  """Base class for things that can create OAuth2Credentials."""
+
+  @classmethod
+  def from_string(cls, path):
+    """Creates an appropriate subclass from a file path or magic string."""
+
+    if path == APPENGINE_CREDENTIALS:
+      return AppengineCredentials()
+    if path == GCE_CREDENTIALS:
+      return GCECredentials()
+    return FileCredentials(path)
+
+  def create(self, scopes):
+    raise NotImplementedError
+
+
+class GCECredentials(CredentialFactory):
+  def create(self, scopes):
+    return gce.AppAssertionCredentials(scopes)
+
+
+class AppengineCredentials(CredentialFactory):
+  def create(self, scopes):  # pragma: no cover
+    # This import doesn't work outside appengine, so delay it until it's used.
+    from oauth2client import appengine
+    return appengine.AppAssertionCredentials(scopes)
+
+
+class FileCredentials(CredentialFactory):
+  def __init__(self, path):
+    self.path = path
+
+  def create(self, scopes):
+    with open(self.path, 'r') as fh:
+      data = json.load(fh)
+    if data.get('type', None):
+      credentials = GoogleCredentials.from_stream(self.path)
+      credentials = credentials.create_scoped(scopes)
+      return credentials
+    return Storage(self.path).get()
+
+
+class DelegateServiceAccountCredentials(CredentialFactory):
+  IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
+
+  def __init__(self, service_account_email, base):
+    self.base = base
+    self.service_account_email = service_account_email
+
+  def create(self, scopes):
+    logging.info('Delegating to service account %s', self.service_account_email)
+    http = httplib2_utils.InstrumentedHttp('actor-credentials')
+    http = self.base.create([self.IAM_SCOPE]).authorize(http)
+    return httplib2_utils.DelegateServiceAccountCredentials(
+        http, self.service_account_email, scopes)
+
+
+class Monitor(object):
+  """Abstract base class encapsulating the ability to collect and send metrics.
+
+  This is a singleton class. There should only be one instance of a Monitor at
+  a time. It will be created and initialized by process_argparse_options. It
+  must exist in order for any metrics to be sent, although both Targets and
+  Metrics may be initialized before the underlying Monitor. If it does not exist
+  at the time that a Metric is sent, an exception will be raised.
+
+  send() can be either synchronous or asynchronous.  If synchronous, it needs to
+  make the HTTP request, wait for a response and return None.
+  If asynchronous, send() should start the request and immediately return some
+  object which is later passed to wait() once all requests have been started.
+  """
+
+  _SCOPES = []
+
+  def send(self, metric_pb):
+    raise NotImplementedError()
+
+  def wait(self, state):  # pragma: no cover
+    pass
+
+
+class HttpsMonitor(Monitor):
+
+  _SCOPES = ['https://www.googleapis.com/auth/prodxmon']
+
+  def __init__(self, endpoint, credential_factory, http=None, ca_certs=None):
+    self._endpoint = endpoint
+    credentials = credential_factory.create(self._SCOPES)
+    if http is None:
+      http = httplib2_utils.RetriableHttp(
+          httplib2_utils.InstrumentedHttp('acq-mon-api', ca_certs=ca_certs))
+    self._http = credentials.authorize(http)
+
+  def encode_to_json(self, metric_pb):
+    return json.dumps({'payload': pb_to_popo.convert(metric_pb)})
+
+  def send(self, metric_pb):
+    body = self.encode_to_json(metric_pb)
+
+    try:
+      resp, content = self._http.request(self._endpoint,
+          method='POST',
+          body=body,
+          headers={'Content-Type': 'application/json'})
+      if resp.status != 200:
+        logging.warning('HttpsMonitor.send received status %d: %s', resp.status,
+                        content)
+    except (ValueError, errors.Error,
+            socket.timeout, socket.error, socket.herror, socket.gaierror,
+            httplib2.HttpLib2Error):
+      logging.exception('HttpsMonitor.send failed')
+
+
+class DebugMonitor(Monitor):
+  """Class which writes metrics to logs or a local file for debugging."""
+  def __init__(self, filepath=None):
+    if filepath is None:
+      self._fh = None
+    else:
+      self._fh = open(filepath, 'a')
+
+  def send(self, metric_pb):
+    text = str(metric_pb)
+    logging.info('Flushing monitoring metrics:\n%s', text)
+    if self._fh is not None:
+      self._fh.write(text + '\n\n')
+      self._fh.flush()
+
+
+class NullMonitor(Monitor):
+  """Class that doesn't send metrics anywhere."""
+  def send(self, metric_pb):
+    pass
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/pb_to_popo.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/pb_to_popo.py
new file mode 100644
index 0000000..abec76e
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/pb_to_popo.py
@@ -0,0 +1,53 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import logging
+import sys
+
+from google.protobuf.descriptor import FieldDescriptor as fd
+import six
+
+
+def convert(pb):
+  """Convert protobuf to plain-old-python-object"""
+  obj = {}
+  for field, value in pb.ListFields():
+    if field.label == fd.LABEL_REPEATED:
+      obj[field.name] = list(_get_json_func(field.type)(v) for v in value)
+    else:
+      obj[field.name] = _get_json_func(field.type)(value)
+  return obj
+
+
+def _get_json_func(field_type):
+  if field_type in _FD_TO_JSON:
+    return _FD_TO_JSON[field_type]
+  else: # pragma: no cover
+    logging.warning("pb_to_popo doesn't support converting %s", field_type)
+    return six.text_type
+
+
+if sys.version_info.major < 3:
+  _64bit_type = long
+else:
+  _64bit_type = int
+
+_FD_TO_JSON  = {
+  fd.TYPE_BOOL: bool,
+  fd.TYPE_DOUBLE: float,
+  fd.TYPE_ENUM: int,
+  fd.TYPE_FIXED32: float,
+  fd.TYPE_FIXED64: float,
+  fd.TYPE_FLOAT: float,
+  fd.TYPE_INT32: int,
+  fd.TYPE_INT64: _64bit_type,
+  fd.TYPE_SFIXED32: float,
+  fd.TYPE_SFIXED64: float,
+  fd.TYPE_SINT32: int,
+  fd.TYPE_SINT64: _64bit_type,
+  fd.TYPE_STRING: six.text_type,
+  fd.TYPE_UINT32: int,
+  fd.TYPE_UINT64: _64bit_type,
+  fd.TYPE_MESSAGE: convert
+}
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/standard_metrics.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/standard_metrics.py
new file mode 100644
index 0000000..f237023
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/standard_metrics.py
@@ -0,0 +1,19 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Metrics common to all tasks and devices."""
+
+from infra_libs.ts_mon.common import metrics
+
+
+up = metrics.BooleanMetric(
+    'presence/up',
+    'Set to True when the program is running, missing otherwise.',
+    None)
+
+
+def init():
+  # TODO(dsansome): Add more metrics for git revision, cipd package version,
+  # uptime, etc.
+  up.set(True)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/targets.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/targets.py
new file mode 100644
index 0000000..be78aa5
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/common/targets.py
@@ -0,0 +1,125 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Classes representing the monitoring interface for tasks or devices."""
+
+
+class Target(object):
+  """Abstract base class for a monitoring target.
+
+  A Target is a "thing" that should be monitored, for example, a device or a
+  process. The majority of the time, a single process will have only a single
+  Target.
+
+  Do not directly instantiate an object of this class.
+  Use the concrete child classes instead:
+  * TaskTarget to monitor a job or tasks running in (potentially) many places;
+  * DeviceTarget to monitor a host machine that may be running a task.
+  """
+
+  def __init__(self):
+    # Subclasses should list the updatable target fields here.
+    self._fields = tuple()
+
+  def populate_target_pb(self, collection_pb):
+    """Populate the 'target' into a MetricsCollection."""
+    raise NotImplementedError()
+
+  def to_dict(self):
+    """Return target field values as a dictionary."""
+    return {field: getattr(self, field) for field in self._fields}
+
+  def update(self, target_fields):
+    """Update values of some target fields given as a dict."""
+    for field, value in target_fields.items():
+      if field not in self._fields:
+        raise AttributeError('Bad target field: %s' % field)
+      # Make sure the attribute actually exists in the object.
+      getattr(self, field)
+      setattr(self, field, value)
+
+  def __eq__(self, other):
+    if type(self) != type(other):
+      return False
+
+    for field in self._fields:
+      if getattr(self, field) != getattr(other,field):
+        return False
+
+    return True
+
+  def __hash__(self):
+    return hash(tuple(sorted(self.to_dict())))
+
+class DeviceTarget(Target):
+  """Monitoring interface class for monitoring specific hosts or devices."""
+
+  def __init__(self, region, role, network, hostname):
+    """Create a Target object exporting info about a specific device.
+
+    Args:
+      region (str): physical region in which the device is located.
+      role (str): role of the device.
+      network (str): virtual network on which the device is located.
+      hostname (str): name by which the device self-identifies.
+    """
+    super(DeviceTarget, self).__init__()
+    self.region = region
+    self.role = role
+    self.network = network
+    self.hostname = hostname
+    self.realm = 'ACQ_CHROME'
+    self.alertable = True
+    self._fields = ('region', 'role', 'network', 'hostname')
+
+  def populate_target_pb(self, collection):
+    """Populate the 'network_device' target into metrics_pb2.MetricsCollection.
+
+    Args:
+      collection (metrics_pb2.MetricsCollection): the collection proto to be
+          populated.
+    """
+    collection.network_device.metro = self.region
+    collection.network_device.role = self.role
+    collection.network_device.hostgroup = self.network
+    collection.network_device.hostname = self.hostname
+    collection.network_device.realm = self.realm
+    collection.network_device.alertable = self.alertable
+
+
+class TaskTarget(Target):
+  """Monitoring interface class for monitoring active jobs or processes."""
+
+  def __init__(self, service_name, job_name, region, hostname, task_num=0):
+    """Create a Target object exporting info about a specific task.
+
+    Args:
+      service_name (str): service of which this task is a part.
+      job_name (str): specific name of this task.
+      region (str): general region in which this task is running.
+      hostname (str): specific machine on which this task is running.
+      task_num (int): replication id of this task.
+    """
+    super(TaskTarget, self).__init__()
+    self.service_name = service_name
+    self.job_name = job_name
+    self.region = region
+    self.hostname = hostname
+    self.task_num = task_num
+    self._fields = ('service_name', 'job_name', 'region',
+                    'hostname', 'task_num')
+
+  def populate_target_pb(self, collection):
+    """Populate the 'task' target into metrics_pb2.MetricsCollection.
+
+    Args:
+      collection (metrics_pb2.MetricsCollection): the collection proto to be
+          populated.
+    """
+    collection.task.service_name = self.service_name
+    collection.task.job_name = self.job_name
+    collection.task.data_center = self.region
+    collection.task.host_name = self.hostname
+    collection.task.task_num = self.task_num
+
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/config.proto b/utils/frozen_chromite/third_party/infra_libs/ts_mon/config.proto
new file mode 100644
index 0000000..e7c1338
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/config.proto
@@ -0,0 +1,13 @@
+syntax = "proto3";
+
+// ts_mon's config file in /etc/chrome-infra/ts-mon.json is a JSON-encoded
+// ConfigFile message.
+// Note: this .proto file isn't currently used to encode/decode the config file,
+// it's just here as a reference.
+message ConfigFile {
+  // Url to post monitoring metrics to.  file:// URLs are supported as well.
+  string endpoint = 1;
+
+  // Path to a pkcs8 json credential file.
+  string credentials = 2;
+}
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/config.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/config.py
new file mode 100644
index 0000000..89fa8fe
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/config.py
@@ -0,0 +1,251 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import logging
+import os
+import socket
+import sys
+import re
+
+import requests
+
+from infra_libs.ts_mon.common import interface
+from infra_libs.ts_mon.common import monitors
+from infra_libs.ts_mon.common import standard_metrics
+from infra_libs.ts_mon.common import targets
+
+
+def load_machine_config(filename):
+  if not os.path.exists(filename):
+    logging.info('Configuration file does not exist, ignoring: %s', filename)
+    return {}
+
+  try:
+    with open(filename) as fh:
+      return json.load(fh)
+  except Exception:
+    logging.error('Configuration file couldn\'t be read: %s', filename)
+    raise
+
+
+def _default_region(fqdn):
+  # Check if we're running in a GCE instance.
+  try:
+    r = requests.get(
+        'http://metadata.google.internal/computeMetadata/v1/instance/zone',
+        headers={'Metadata-Flavor': 'Google'},
+        timeout=1.0)
+  except requests.exceptions.RequestException:
+    pass
+  else:
+    if r.status_code == requests.codes.ok:
+      # The zone is the last slash-separated component.
+      return r.text.split('/')[-1]
+
+  try:
+    return fqdn.split('.')[1]  # [chrome|golo]
+  except IndexError:
+    return ''
+
+
+def _default_network(host):
+  try:
+    # Regular expression that matches the vast majority of our host names.
+    # Matches everything of the form 'masterN', 'masterNa', and 'foo-xN'.
+    return re.match(r'^([\w-]*?-[acm]|master)(\d+)a?$', host).group(2)  # N
+  except AttributeError:
+    return ''
+
+
+def add_argparse_options(parser):
+  """Add monitoring related flags to a process' argument parser.
+
+  Args:
+    parser (argparse.ArgumentParser): the parser for the main process.
+  """
+  if sys.platform == 'win32':  # pragma: no cover
+    default_config_file = 'C:\\chrome-infra\\ts-mon.json'
+  else:  # pragma: no cover
+    default_config_file = '/etc/chrome-infra/ts-mon.json'
+
+  parser = parser.add_argument_group('Timeseries Monitoring Options')
+  parser.add_argument(
+      '--ts-mon-config-file',
+      default=default_config_file,
+      help='path to a JSON config file that contains suitable values for '
+           '"endpoint" and "credentials" for this machine. This config file is '
+           'intended to be shared by all processes on the machine, as the '
+           'values depend on the machine\'s position in the network, IP '
+           'whitelisting and deployment of credentials. (default: %(default)s)')
+  parser.add_argument(
+      '--ts-mon-endpoint',
+      help='url (file:// or https://) to post monitoring metrics to. If set, '
+           'overrides the value in --ts-mon-config-file')
+  parser.add_argument(
+      '--ts-mon-credentials',
+      help='path to a pkcs8 json credential file. If set, overrides the value '
+           'in --ts-mon-config-file')
+  parser.add_argument(
+      '--ts-mon-ca-certs',
+      help='path to file containing root CA certificates for SSL server '
+           'certificate validation. If not set, a CA cert file bundled with '
+           'httplib2 is used.')
+  parser.add_argument(
+      '--ts-mon-flush',
+      choices=('manual', 'auto'), default='auto',
+      help=('metric push behavior: manual (only send when flush() is called), '
+            'or auto (send automatically every --ts-mon-flush-interval-secs '
+            'seconds). (default: %(default)s)'))
+  parser.add_argument(
+      '--ts-mon-flush-interval-secs',
+      type=int,
+      default=60,
+      help=('automatically push metrics on this interval if '
+            '--ts-mon-flush=auto.'))
+  parser.add_argument(
+      '--ts-mon-autogen-hostname',
+      action="store_true",
+      help=('Indicate that the hostname is autogenerated. '
+            'This option must be set on autoscaled GCE VMs, Kubernetes pods, '
+            'or any other hosts with dynamically generated names.'))
+
+  parser.add_argument(
+      '--ts-mon-target-type',
+      choices=('device', 'task'),
+      default='device',
+      help='the type of target that is being monitored ("device" or "task").'
+           ' (default: %(default)s)')
+
+  fqdn = socket.getfqdn().lower()  # foo-[a|m]N.[chrome|golo].chromium.org
+  host = fqdn.split('.')[0]  # foo-[a|m]N
+  region = _default_region(fqdn)
+  network = _default_network(host)
+
+  parser.add_argument(
+      '--ts-mon-device-hostname',
+      default=host,
+      help='name of this device, (default: %(default)s)')
+  parser.add_argument(
+      '--ts-mon-device-region',
+      default=region,
+      help='name of the region this devices lives in. (default: %(default)s)')
+  parser.add_argument(
+      '--ts-mon-device-role',
+      default='default',
+      help='Role of the device. (default: %(default)s)')
+  parser.add_argument(
+      '--ts-mon-device-network',
+      default=network,
+      help='name of the network this device is connected to. '
+           '(default: %(default)s)')
+
+  parser.add_argument(
+      '--ts-mon-task-service-name',
+      help='name of the service being monitored')
+  parser.add_argument(
+      '--ts-mon-task-job-name',
+      help='name of this job instance of the task')
+  parser.add_argument(
+      '--ts-mon-task-region',
+      default=region,
+      help='name of the region in which this task is running '
+           '(default: %(default)s)')
+  parser.add_argument(
+      '--ts-mon-task-hostname',
+      default=host,
+      help='name of the host on which this task is running '
+           '(default: %(default)s)')
+  parser.add_argument(
+      '--ts-mon-task-number', type=int, default=0,
+      help='number (e.g. for replication) of this instance of this task '
+           '(default: %(default)s)')
+
+  parser.add_argument(
+      '--ts-mon-metric-name-prefix',
+      default='/chrome/infra/',
+      help='metric name prefix for all metrics (default: %(default)s)')
+
+  parser.add_argument(
+      '--ts-mon-use-new-proto',
+      default=True, action='store_true',
+      help='deprecated and ignored')
+
+
+def process_argparse_options(args):
+  """Process command line arguments to initialize the global monitor.
+
+  Also initializes the default target.
+
+  Starts a background thread to automatically flush monitoring metrics if not
+  disabled by command line arguments.
+
+  Args:
+    args (argparse.Namespace): the result of parsing the command line arguments
+  """
+  # Parse the config file if it exists.
+  config = load_machine_config(args.ts_mon_config_file)
+  endpoint = config.get('endpoint', '')
+  credentials = config.get('credentials', '')
+  autogen_hostname = config.get('autogen_hostname', False)
+
+  # Command-line args override the values in the config file.
+  if args.ts_mon_endpoint is not None:
+    endpoint = args.ts_mon_endpoint
+  if args.ts_mon_credentials is not None:
+    credentials = args.ts_mon_credentials
+
+  if args.ts_mon_target_type == 'device':
+    hostname = args.ts_mon_device_hostname
+    if args.ts_mon_autogen_hostname or autogen_hostname:
+      hostname = 'autogen:' + hostname
+    interface.state.target = targets.DeviceTarget(
+        args.ts_mon_device_region,
+        args.ts_mon_device_role,
+        args.ts_mon_device_network,
+        hostname)
+  if args.ts_mon_target_type == 'task':
+    # Reimplement ArgumentParser.error, since we don't have access to the parser
+    if not args.ts_mon_task_service_name:
+      print >> sys.stderr, ('Argument --ts-mon-task-service-name must be '
+                            'provided when the target type is "task".')
+      sys.exit(2)
+    if not args.ts_mon_task_job_name:
+      print >> sys.stderr, ('Argument --ts-mon-task-job-name must be provided '
+                            'when the target type is "task".')
+      sys.exit(2)
+    hostname = args.ts_mon_task_hostname
+    if args.ts_mon_autogen_hostname or autogen_hostname:
+      hostname = 'autogen:' + hostname
+    interface.state.target = targets.TaskTarget(
+        args.ts_mon_task_service_name,
+        args.ts_mon_task_job_name,
+        args.ts_mon_task_region,
+        hostname,
+        args.ts_mon_task_number)
+
+  interface.state.metric_name_prefix = args.ts_mon_metric_name_prefix
+  interface.state.global_monitor = monitors.NullMonitor()
+
+  if endpoint.startswith('file://'):
+    interface.state.global_monitor = monitors.DebugMonitor(
+        endpoint[len('file://'):])
+  elif endpoint.startswith('https://'):
+    interface.state.global_monitor = monitors.HttpsMonitor(
+        endpoint, monitors.CredentialFactory.from_string(credentials),
+        ca_certs=args.ts_mon_ca_certs)
+  elif endpoint.lower() == 'none' or not endpoint:
+    logging.info('ts_mon monitoring has been explicitly disabled')
+  else:
+    logging.error('ts_mon monitoring is disabled because the endpoint provided'
+                  ' is invalid or not supported: %s', endpoint)
+
+  interface.state.flush_mode = args.ts_mon_flush
+
+  if args.ts_mon_flush == 'auto':
+    interface.state.flush_thread = interface._FlushThread(
+        args.ts_mon_flush_interval_secs)
+    interface.state.flush_thread.start()
+
+  standard_metrics.init()
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/README.md b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/README.md
new file mode 100644
index 0000000..c4ff163
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/README.md
@@ -0,0 +1,9 @@
+Updating the *.proto files: see go/updating-tsmon-protos
+
+To generate the `*_pb2.py` files from the `*proto` files:
+
+    cd infra_libs/ts_mon/protos/new
+    protoc --python_out=. *.proto
+
+protoc version tested: libprotoc 3.0.0
+
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/__init__.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/__init__.py
new file mode 100644
index 0000000..1aaf0e1
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_network_device.proto b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_network_device.proto
new file mode 100644
index 0000000..73fc276
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_network_device.proto
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+syntax = "proto2";
+
+package ts_mon.proto;
+
+message NetworkDevice {
+  enum TypeId { MESSAGE_TYPE_ID = 34049749; };
+  optional string proxy_environment = 5;
+  optional string acquisition_name = 10;
+  optional string pop = 30;
+  optional bool alertable = 101;
+  optional string realm = 102;
+  optional int64 asn = 103;
+  optional string metro = 104;
+  optional string role = 105;
+  optional string hostname = 106;
+  optional string vendor = 70;
+  optional string hostgroup = 108;
+  optional string proxy_zone = 100;
+}
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_network_device_pb2.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_network_device_pb2.py
new file mode 100644
index 0000000..b571a8a
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_network_device_pb2.py
@@ -0,0 +1,164 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: acquisition_network_device.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='acquisition_network_device.proto',
+  package='ts_mon.proto',
+  serialized_pb=_b('\n acquisition_network_device.proto\x12\x0cts_mon.proto\"\x88\x02\n\rNetworkDevice\x12\x19\n\x11proxy_environment\x18\x05 \x01(\t\x12\x18\n\x10\x61\x63quisition_name\x18\n \x01(\t\x12\x0b\n\x03pop\x18\x1e \x01(\t\x12\x11\n\talertable\x18\x65 \x01(\x08\x12\r\n\x05realm\x18\x66 \x01(\t\x12\x0b\n\x03\x61sn\x18g \x01(\x03\x12\r\n\x05metro\x18h \x01(\t\x12\x0c\n\x04role\x18i \x01(\t\x12\x10\n\x08hostname\x18j \x01(\t\x12\x0e\n\x06vendor\x18\x46 \x01(\t\x12\x11\n\thostgroup\x18l \x01(\t\x12\x12\n\nproxy_zone\x18\x64 \x01(\t\" \n\x06TypeId\x12\x16\n\x0fMESSAGE_TYPE_ID\x10\xd5\x9d\x9e\x10')
+)
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+_NETWORKDEVICE_TYPEID = _descriptor.EnumDescriptor(
+  name='TypeId',
+  full_name='ts_mon.proto.NetworkDevice.TypeId',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='MESSAGE_TYPE_ID', index=0, number=34049749,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=283,
+  serialized_end=315,
+)
+_sym_db.RegisterEnumDescriptor(_NETWORKDEVICE_TYPEID)
+
+
+_NETWORKDEVICE = _descriptor.Descriptor(
+  name='NetworkDevice',
+  full_name='ts_mon.proto.NetworkDevice',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='proxy_environment', full_name='ts_mon.proto.NetworkDevice.proxy_environment', index=0,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='acquisition_name', full_name='ts_mon.proto.NetworkDevice.acquisition_name', index=1,
+      number=10, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='pop', full_name='ts_mon.proto.NetworkDevice.pop', index=2,
+      number=30, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='alertable', full_name='ts_mon.proto.NetworkDevice.alertable', index=3,
+      number=101, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='realm', full_name='ts_mon.proto.NetworkDevice.realm', index=4,
+      number=102, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='asn', full_name='ts_mon.proto.NetworkDevice.asn', index=5,
+      number=103, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='metro', full_name='ts_mon.proto.NetworkDevice.metro', index=6,
+      number=104, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='role', full_name='ts_mon.proto.NetworkDevice.role', index=7,
+      number=105, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='hostname', full_name='ts_mon.proto.NetworkDevice.hostname', index=8,
+      number=106, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='vendor', full_name='ts_mon.proto.NetworkDevice.vendor', index=9,
+      number=70, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='hostgroup', full_name='ts_mon.proto.NetworkDevice.hostgroup', index=10,
+      number=108, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='proxy_zone', full_name='ts_mon.proto.NetworkDevice.proxy_zone', index=11,
+      number=100, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _NETWORKDEVICE_TYPEID,
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=51,
+  serialized_end=315,
+)
+
+_NETWORKDEVICE_TYPEID.containing_type = _NETWORKDEVICE
+DESCRIPTOR.message_types_by_name['NetworkDevice'] = _NETWORKDEVICE
+
+NetworkDevice = _reflection.GeneratedProtocolMessageType('NetworkDevice', (_message.Message,), dict(
+  DESCRIPTOR = _NETWORKDEVICE,
+  __module__ = 'acquisition_network_device_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.NetworkDevice)
+  ))
+_sym_db.RegisterMessage(NetworkDevice)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_task.proto b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_task.proto
new file mode 100644
index 0000000..2306500
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_task.proto
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+syntax = "proto2";
+
+package ts_mon.proto;
+
+message Task {
+  enum TypeId { MESSAGE_TYPE_ID = 34049749; };
+  optional string proxy_environment = 5;
+  optional string acquisition_name = 10;
+  optional string service_name = 20;
+  optional string job_name = 30;
+  optional string data_center = 40;
+  optional string host_name = 50;
+  optional int32 task_num = 60;
+  optional string proxy_zone = 70;
+}
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_task_pb2.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_task_pb2.py
new file mode 100644
index 0000000..9097fe9
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/acquisition_task_pb2.py
@@ -0,0 +1,136 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: acquisition_task.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='acquisition_task.proto',
+  package='ts_mon.proto',
+  serialized_pb=_b('\n\x16\x61\x63quisition_task.proto\x12\x0cts_mon.proto\"\xd3\x01\n\x04Task\x12\x19\n\x11proxy_environment\x18\x05 \x01(\t\x12\x18\n\x10\x61\x63quisition_name\x18\n \x01(\t\x12\x14\n\x0cservice_name\x18\x14 \x01(\t\x12\x10\n\x08job_name\x18\x1e \x01(\t\x12\x13\n\x0b\x64\x61ta_center\x18( \x01(\t\x12\x11\n\thost_name\x18\x32 \x01(\t\x12\x10\n\x08task_num\x18< \x01(\x05\x12\x12\n\nproxy_zone\x18\x46 \x01(\t\" \n\x06TypeId\x12\x16\n\x0fMESSAGE_TYPE_ID\x10\xd5\x9d\x9e\x10')
+)
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+_TASK_TYPEID = _descriptor.EnumDescriptor(
+  name='TypeId',
+  full_name='ts_mon.proto.Task.TypeId',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='MESSAGE_TYPE_ID', index=0, number=34049749,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=220,
+  serialized_end=252,
+)
+_sym_db.RegisterEnumDescriptor(_TASK_TYPEID)
+
+
+_TASK = _descriptor.Descriptor(
+  name='Task',
+  full_name='ts_mon.proto.Task',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='proxy_environment', full_name='ts_mon.proto.Task.proxy_environment', index=0,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='acquisition_name', full_name='ts_mon.proto.Task.acquisition_name', index=1,
+      number=10, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='service_name', full_name='ts_mon.proto.Task.service_name', index=2,
+      number=20, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='job_name', full_name='ts_mon.proto.Task.job_name', index=3,
+      number=30, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data_center', full_name='ts_mon.proto.Task.data_center', index=4,
+      number=40, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='host_name', full_name='ts_mon.proto.Task.host_name', index=5,
+      number=50, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='task_num', full_name='ts_mon.proto.Task.task_num', index=6,
+      number=60, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='proxy_zone', full_name='ts_mon.proto.Task.proxy_zone', index=7,
+      number=70, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _TASK_TYPEID,
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=41,
+  serialized_end=252,
+)
+
+_TASK_TYPEID.containing_type = _TASK
+DESCRIPTOR.message_types_by_name['Task'] = _TASK
+
+Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), dict(
+  DESCRIPTOR = _TASK,
+  __module__ = 'acquisition_task_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.Task)
+  ))
+_sym_db.RegisterMessage(Task)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/any.proto b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/any.proto
new file mode 100644
index 0000000..b66ab09
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/any.proto
@@ -0,0 +1,11 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+syntax = "proto2";
+
+package ts_mon.proto;
+
+message Any {
+  optional string type_url = 1 [ctype=STRING_PIECE];
+  optional bytes value = 2 [ctype=CORD];
+}
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/any_pb2.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/any_pb2.py
new file mode 100644
index 0000000..ca0eca7
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/any_pb2.py
@@ -0,0 +1,78 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: any.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='any.proto',
+  package='ts_mon.proto',
+  serialized_pb=_b('\n\tany.proto\x12\x0cts_mon.proto\".\n\x03\x41ny\x12\x14\n\x08type_url\x18\x01 \x01(\tB\x02\x08\x02\x12\x11\n\x05value\x18\x02 \x01(\x0c\x42\x02\x08\x01')
+)
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+
+_ANY = _descriptor.Descriptor(
+  name='Any',
+  full_name='ts_mon.proto.Any',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='type_url', full_name='ts_mon.proto.Any.type_url', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\010\002'))),
+    _descriptor.FieldDescriptor(
+      name='value', full_name='ts_mon.proto.Any.value', index=1,
+      number=2, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\010\001'))),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=27,
+  serialized_end=73,
+)
+
+DESCRIPTOR.message_types_by_name['Any'] = _ANY
+
+Any = _reflection.GeneratedProtocolMessageType('Any', (_message.Message,), dict(
+  DESCRIPTOR = _ANY,
+  __module__ = 'any_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.Any)
+  ))
+_sym_db.RegisterMessage(Any)
+
+
+_ANY.fields_by_name['type_url'].has_options = True
+_ANY.fields_by_name['type_url']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\010\002'))
+_ANY.fields_by_name['value'].has_options = True
+_ANY.fields_by_name['value']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\010\001'))
+# @@protoc_insertion_point(module_scope)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/metrics.proto b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/metrics.proto
new file mode 100644
index 0000000..8d23d15
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/metrics.proto
@@ -0,0 +1,128 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+syntax = "proto2";
+
+package ts_mon.proto;
+
+import "any.proto";
+import "timestamp.proto";
+import "acquisition_network_device.proto";
+import "acquisition_task.proto";
+
+message MetricsPayload {
+  repeated MetricsCollection metrics_collection = 1;
+}
+
+message MetricsCollection {
+  repeated MetricsDataSet metrics_data_set = 1;
+  oneof target_schema {
+    NetworkDevice network_device = 11;
+    Task task = 12;
+  }
+}
+
+message MetricsDataSet {
+  optional string metric_name = 1;
+  repeated MetricFieldDescriptor field_descriptor = 2;
+  optional StreamKind stream_kind = 3;
+  optional ValueType value_type = 4;
+  optional string description = 5;
+  optional Annotations annotations = 6;
+  repeated MetricsData data = 7;
+  message MetricFieldDescriptor {
+    optional string name = 1;
+
+    optional FieldType field_type = 2;
+    enum FieldType {
+      STRING = 0;
+      INT64 = 1;
+      BOOL = 2;
+    }
+  }
+}
+
+message MetricsData {
+  oneof value {
+    bool bool_value = 1;
+    string string_value = 2;
+    int64 int64_value = 3;
+    double double_value = 4;
+    Distribution distribution_value = 5;
+  }
+
+  repeated MetricField field = 6;
+  message MetricField {
+    optional string name = 1;
+
+    oneof value {
+      string string_value = 2;
+      int64 int64_value = 3;
+      bool bool_value = 4;
+    }
+  }
+
+  optional Timestamp start_timestamp = 7;
+  optional Timestamp end_timestamp = 8;
+
+  message Distribution {
+    optional int64 count = 1;
+    optional double mean = 2;
+    optional double sum_of_squared_deviation = 3;
+    optional double minimum = 4;
+    optional double maximum = 5;
+
+    oneof bucket_options {
+      LinearOptions linear_buckets = 6;
+      ExponentialOptions exponential_buckets = 7;
+      ExplicitOptions explicit_buckets = 8;
+    }
+
+    message LinearOptions {
+      optional int32 num_finite_buckets = 1;
+      optional double width = 2;
+      optional double offset = 3;
+    }
+
+    message ExponentialOptions {
+      optional int32 num_finite_buckets = 1;
+      optional double growth_factor = 2;
+      optional double scale = 3;
+    }
+
+    message ExplicitOptions {
+      repeated double bound = 1 [packed = true];
+    }
+
+    repeated int64 bucket_count = 9 [packed = true];
+
+    repeated Exemplar exemplar = 10;
+
+    message Exemplar {
+      optional double value = 1;
+      optional Timestamp timestamp = 2;
+      repeated Any attachment = 3;
+    }
+  }
+}
+
+message Annotations {
+  optional string unit = 1;
+  optional bool timestamp = 2;
+  optional string deprecation = 3;
+  repeated Any annotation = 4;
+}
+
+enum StreamKind {
+  GAUGE = 0;
+  CUMULATIVE = 1;
+  DELTA = 2;
+}
+
+enum ValueType {
+  BOOL = 0;
+  STRING = 1;
+  INT64 = 2;
+  DOUBLE = 3;
+  DISTRIBUTION = 4;
+}
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/metrics_pb2.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/metrics_pb2.py
new file mode 100644
index 0000000..39cfa46
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/metrics_pb2.py
@@ -0,0 +1,926 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: metrics.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from . import any_pb2
+from . import timestamp_pb2
+from . import acquisition_network_device_pb2
+from . import acquisition_task_pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='metrics.proto',
+  package='ts_mon.proto',
+  serialized_pb=_b('\n\rmetrics.proto\x12\x0cts_mon.proto\x1a\tany.proto\x1a\x0ftimestamp.proto\x1a acquisition_network_device.proto\x1a\x16\x61\x63quisition_task.proto\"M\n\x0eMetricsPayload\x12;\n\x12metrics_collection\x18\x01 \x03(\x0b\x32\x1f.ts_mon.proto.MetricsCollection\"\xb7\x01\n\x11MetricsCollection\x12\x36\n\x10metrics_data_set\x18\x01 \x03(\x0b\x32\x1c.ts_mon.proto.MetricsDataSet\x12\x35\n\x0enetwork_device\x18\x0b \x01(\x0b\x32\x1b.ts_mon.proto.NetworkDeviceH\x00\x12\"\n\x04task\x18\x0c \x01(\x0b\x32\x12.ts_mon.proto.TaskH\x00\x42\x0f\n\rtarget_schema\"\xe5\x03\n\x0eMetricsDataSet\x12\x13\n\x0bmetric_name\x18\x01 \x01(\t\x12L\n\x10\x66ield_descriptor\x18\x02 \x03(\x0b\x32\x32.ts_mon.proto.MetricsDataSet.MetricFieldDescriptor\x12-\n\x0bstream_kind\x18\x03 \x01(\x0e\x32\x18.ts_mon.proto.StreamKind\x12+\n\nvalue_type\x18\x04 \x01(\x0e\x32\x17.ts_mon.proto.ValueType\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\x12.\n\x0b\x61nnotations\x18\x06 \x01(\x0b\x32\x19.ts_mon.proto.Annotations\x12\'\n\x04\x64\x61ta\x18\x07 \x03(\x0b\x32\x19.ts_mon.proto.MetricsData\x1a\xa5\x01\n\x15MetricFieldDescriptor\x12\x0c\n\x04name\x18\x01 \x01(\t\x12P\n\nfield_type\x18\x02 \x01(\x0e\x32<.ts_mon.proto.MetricsDataSet.MetricFieldDescriptor.FieldType\",\n\tFieldType\x12\n\n\x06STRING\x10\x00\x12\t\n\x05INT64\x10\x01\x12\x08\n\x04\x42OOL\x10\x02\"\xd3\t\n\x0bMetricsData\x12\x14\n\nbool_value\x18\x01 \x01(\x08H\x00\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0bint64_value\x18\x03 \x01(\x03H\x00\x12\x16\n\x0c\x64ouble_value\x18\x04 \x01(\x01H\x00\x12\x44\n\x12\x64istribution_value\x18\x05 \x01(\x0b\x32&.ts_mon.proto.MetricsData.DistributionH\x00\x12\x34\n\x05\x66ield\x18\x06 \x03(\x0b\x32%.ts_mon.proto.MetricsData.MetricField\x12\x30\n\x0fstart_timestamp\x18\x07 \x01(\x0b\x32\x17.ts_mon.proto.Timestamp\x12.\n\rend_timestamp\x18\x08 \x01(\x0b\x32\x17.ts_mon.proto.Timestamp\x1ai\n\x0bMetricField\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x16\n\x0cstring_value\x18\x02 \x01(\tH\x00\x12\x15\n\x0bint64_value\x18\x03 \x01(\x03H\x00\x12\x14\n\nbool_value\x18\x04 \x01(\x08H\x00\x42\x07\n\x05value\x1a\x94\x06\n\x0c\x44istribution\x12\r\n\x05\x63ount\x18\x01 \x01(\x03\x12\x0c\n\x04mean\x18\x02 \x01(\x01\x12 \n\x18sum_of_squared_deviation\x18\x03 \x01(\x01\x12\x0f\n\x07minimum\x18\x04 \x01(\x01\x12\x0f\n\x07maximum\x18\x05 \x01(\x01\x12N\n\x0elinear_buckets\x18\x06 \x01(\x0b\x32\x34.ts_mon.proto.MetricsData.Distribution.LinearOptionsH\x00\x12X\n\x13\x65xponential_buckets\x18\x07 \x01(\x0b\x32\x39.ts_mon.proto.MetricsData.Distribution.ExponentialOptionsH\x00\x12R\n\x10\x65xplicit_buckets\x18\x08 \x01(\x0b\x32\x36.ts_mon.proto.MetricsData.Distribution.ExplicitOptionsH\x00\x12\x18\n\x0c\x62ucket_count\x18\t \x03(\x03\x42\x02\x10\x01\x12\x41\n\x08\x65xemplar\x18\n \x03(\x0b\x32/.ts_mon.proto.MetricsData.Distribution.Exemplar\x1aJ\n\rLinearOptions\x12\x1a\n\x12num_finite_buckets\x18\x01 \x01(\x05\x12\r\n\x05width\x18\x02 \x01(\x01\x12\x0e\n\x06offset\x18\x03 \x01(\x01\x1aV\n\x12\x45xponentialOptions\x12\x1a\n\x12num_finite_buckets\x18\x01 \x01(\x05\x12\x15\n\rgrowth_factor\x18\x02 \x01(\x01\x12\r\n\x05scale\x18\x03 \x01(\x01\x1a$\n\x0f\x45xplicitOptions\x12\x11\n\x05\x62ound\x18\x01 \x03(\x01\x42\x02\x10\x01\x1al\n\x08\x45xemplar\x12\r\n\x05value\x18\x01 \x01(\x01\x12*\n\ttimestamp\x18\x02 \x01(\x0b\x32\x17.ts_mon.proto.Timestamp\x12%\n\nattachment\x18\x03 \x03(\x0b\x32\x11.ts_mon.proto.AnyB\x10\n\x0e\x62ucket_optionsB\x07\n\x05value\"j\n\x0b\x41nnotations\x12\x0c\n\x04unit\x18\x01 \x01(\t\x12\x11\n\ttimestamp\x18\x02 \x01(\x08\x12\x13\n\x0b\x64\x65precation\x18\x03 \x01(\t\x12%\n\nannotation\x18\x04 \x03(\x0b\x32\x11.ts_mon.proto.Any*2\n\nStreamKind\x12\t\n\x05GAUGE\x10\x00\x12\x0e\n\nCUMULATIVE\x10\x01\x12\t\n\x05\x44\x45LTA\x10\x02*J\n\tValueType\x12\x08\n\x04\x42OOL\x10\x00\x12\n\n\x06STRING\x10\x01\x12\t\n\x05INT64\x10\x02\x12\n\n\x06\x44OUBLE\x10\x03\x12\x10\n\x0c\x44ISTRIBUTION\x10\x04')
+  ,
+  dependencies=[any_pb2.DESCRIPTOR,timestamp_pb2.DESCRIPTOR,acquisition_network_device_pb2.DESCRIPTOR,acquisition_task_pb2.DESCRIPTOR,])
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+_STREAMKIND = _descriptor.EnumDescriptor(
+  name='StreamKind',
+  full_name='ts_mon.proto.StreamKind',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='GAUGE', index=0, number=0,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CUMULATIVE', index=1, number=1,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DELTA', index=2, number=2,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=2216,
+  serialized_end=2266,
+)
+_sym_db.RegisterEnumDescriptor(_STREAMKIND)
+
+StreamKind = enum_type_wrapper.EnumTypeWrapper(_STREAMKIND)
+_VALUETYPE = _descriptor.EnumDescriptor(
+  name='ValueType',
+  full_name='ts_mon.proto.ValueType',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='BOOL', index=0, number=0,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='STRING', index=1, number=1,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='INT64', index=2, number=2,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DOUBLE', index=3, number=3,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DISTRIBUTION', index=4, number=4,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=2268,
+  serialized_end=2342,
+)
+_sym_db.RegisterEnumDescriptor(_VALUETYPE)
+
+ValueType = enum_type_wrapper.EnumTypeWrapper(_VALUETYPE)
+GAUGE = 0
+CUMULATIVE = 1
+DELTA = 2
+BOOL = 0
+STRING = 1
+INT64 = 2
+DOUBLE = 3
+DISTRIBUTION = 4
+
+
+_METRICSDATASET_METRICFIELDDESCRIPTOR_FIELDTYPE = _descriptor.EnumDescriptor(
+  name='FieldType',
+  full_name='ts_mon.proto.MetricsDataSet.MetricFieldDescriptor.FieldType',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='STRING', index=0, number=0,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='INT64', index=1, number=1,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='BOOL', index=2, number=2,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=824,
+  serialized_end=868,
+)
+_sym_db.RegisterEnumDescriptor(_METRICSDATASET_METRICFIELDDESCRIPTOR_FIELDTYPE)
+
+
+_METRICSPAYLOAD = _descriptor.Descriptor(
+  name='MetricsPayload',
+  full_name='ts_mon.proto.MetricsPayload',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='metrics_collection', full_name='ts_mon.proto.MetricsPayload.metrics_collection', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=117,
+  serialized_end=194,
+)
+
+
+_METRICSCOLLECTION = _descriptor.Descriptor(
+  name='MetricsCollection',
+  full_name='ts_mon.proto.MetricsCollection',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='metrics_data_set', full_name='ts_mon.proto.MetricsCollection.metrics_data_set', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='network_device', full_name='ts_mon.proto.MetricsCollection.network_device', index=1,
+      number=11, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='task', full_name='ts_mon.proto.MetricsCollection.task', index=2,
+      number=12, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='target_schema', full_name='ts_mon.proto.MetricsCollection.target_schema',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=197,
+  serialized_end=380,
+)
+
+
+_METRICSDATASET_METRICFIELDDESCRIPTOR = _descriptor.Descriptor(
+  name='MetricFieldDescriptor',
+  full_name='ts_mon.proto.MetricsDataSet.MetricFieldDescriptor',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='ts_mon.proto.MetricsDataSet.MetricFieldDescriptor.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='field_type', full_name='ts_mon.proto.MetricsDataSet.MetricFieldDescriptor.field_type', index=1,
+      number=2, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+    _METRICSDATASET_METRICFIELDDESCRIPTOR_FIELDTYPE,
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=703,
+  serialized_end=868,
+)
+
+_METRICSDATASET = _descriptor.Descriptor(
+  name='MetricsDataSet',
+  full_name='ts_mon.proto.MetricsDataSet',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='metric_name', full_name='ts_mon.proto.MetricsDataSet.metric_name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='field_descriptor', full_name='ts_mon.proto.MetricsDataSet.field_descriptor', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='stream_kind', full_name='ts_mon.proto.MetricsDataSet.stream_kind', index=2,
+      number=3, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='value_type', full_name='ts_mon.proto.MetricsDataSet.value_type', index=3,
+      number=4, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='description', full_name='ts_mon.proto.MetricsDataSet.description', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='annotations', full_name='ts_mon.proto.MetricsDataSet.annotations', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='data', full_name='ts_mon.proto.MetricsDataSet.data', index=6,
+      number=7, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[_METRICSDATASET_METRICFIELDDESCRIPTOR, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=383,
+  serialized_end=868,
+)
+
+
+_METRICSDATA_METRICFIELD = _descriptor.Descriptor(
+  name='MetricField',
+  full_name='ts_mon.proto.MetricsData.MetricField',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='ts_mon.proto.MetricsData.MetricField.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='string_value', full_name='ts_mon.proto.MetricsData.MetricField.string_value', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='int64_value', full_name='ts_mon.proto.MetricsData.MetricField.int64_value', index=2,
+      number=3, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='bool_value', full_name='ts_mon.proto.MetricsData.MetricField.bool_value', index=3,
+      number=4, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='value', full_name='ts_mon.proto.MetricsData.MetricField.value',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=1201,
+  serialized_end=1306,
+)
+
+_METRICSDATA_DISTRIBUTION_LINEAROPTIONS = _descriptor.Descriptor(
+  name='LinearOptions',
+  full_name='ts_mon.proto.MetricsData.Distribution.LinearOptions',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='num_finite_buckets', full_name='ts_mon.proto.MetricsData.Distribution.LinearOptions.num_finite_buckets', index=0,
+      number=1, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='width', full_name='ts_mon.proto.MetricsData.Distribution.LinearOptions.width', index=1,
+      number=2, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='offset', full_name='ts_mon.proto.MetricsData.Distribution.LinearOptions.offset', index=2,
+      number=3, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1769,
+  serialized_end=1843,
+)
+
+_METRICSDATA_DISTRIBUTION_EXPONENTIALOPTIONS = _descriptor.Descriptor(
+  name='ExponentialOptions',
+  full_name='ts_mon.proto.MetricsData.Distribution.ExponentialOptions',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='num_finite_buckets', full_name='ts_mon.proto.MetricsData.Distribution.ExponentialOptions.num_finite_buckets', index=0,
+      number=1, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='growth_factor', full_name='ts_mon.proto.MetricsData.Distribution.ExponentialOptions.growth_factor', index=1,
+      number=2, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='scale', full_name='ts_mon.proto.MetricsData.Distribution.ExponentialOptions.scale', index=2,
+      number=3, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1845,
+  serialized_end=1931,
+)
+
+_METRICSDATA_DISTRIBUTION_EXPLICITOPTIONS = _descriptor.Descriptor(
+  name='ExplicitOptions',
+  full_name='ts_mon.proto.MetricsData.Distribution.ExplicitOptions',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='bound', full_name='ts_mon.proto.MetricsData.Distribution.ExplicitOptions.bound', index=0,
+      number=1, type=1, cpp_type=5, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1933,
+  serialized_end=1969,
+)
+
+_METRICSDATA_DISTRIBUTION_EXEMPLAR = _descriptor.Descriptor(
+  name='Exemplar',
+  full_name='ts_mon.proto.MetricsData.Distribution.Exemplar',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='value', full_name='ts_mon.proto.MetricsData.Distribution.Exemplar.value', index=0,
+      number=1, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='timestamp', full_name='ts_mon.proto.MetricsData.Distribution.Exemplar.timestamp', index=1,
+      number=2, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='attachment', full_name='ts_mon.proto.MetricsData.Distribution.Exemplar.attachment', index=2,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1971,
+  serialized_end=2079,
+)
+
+_METRICSDATA_DISTRIBUTION = _descriptor.Descriptor(
+  name='Distribution',
+  full_name='ts_mon.proto.MetricsData.Distribution',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='count', full_name='ts_mon.proto.MetricsData.Distribution.count', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='mean', full_name='ts_mon.proto.MetricsData.Distribution.mean', index=1,
+      number=2, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='sum_of_squared_deviation', full_name='ts_mon.proto.MetricsData.Distribution.sum_of_squared_deviation', index=2,
+      number=3, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='minimum', full_name='ts_mon.proto.MetricsData.Distribution.minimum', index=3,
+      number=4, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='maximum', full_name='ts_mon.proto.MetricsData.Distribution.maximum', index=4,
+      number=5, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='linear_buckets', full_name='ts_mon.proto.MetricsData.Distribution.linear_buckets', index=5,
+      number=6, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='exponential_buckets', full_name='ts_mon.proto.MetricsData.Distribution.exponential_buckets', index=6,
+      number=7, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='explicit_buckets', full_name='ts_mon.proto.MetricsData.Distribution.explicit_buckets', index=7,
+      number=8, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='bucket_count', full_name='ts_mon.proto.MetricsData.Distribution.bucket_count', index=8,
+      number=9, type=3, cpp_type=2, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
+    _descriptor.FieldDescriptor(
+      name='exemplar', full_name='ts_mon.proto.MetricsData.Distribution.exemplar', index=9,
+      number=10, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[_METRICSDATA_DISTRIBUTION_LINEAROPTIONS, _METRICSDATA_DISTRIBUTION_EXPONENTIALOPTIONS, _METRICSDATA_DISTRIBUTION_EXPLICITOPTIONS, _METRICSDATA_DISTRIBUTION_EXEMPLAR, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='bucket_options', full_name='ts_mon.proto.MetricsData.Distribution.bucket_options',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=1309,
+  serialized_end=2097,
+)
+
+_METRICSDATA = _descriptor.Descriptor(
+  name='MetricsData',
+  full_name='ts_mon.proto.MetricsData',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='bool_value', full_name='ts_mon.proto.MetricsData.bool_value', index=0,
+      number=1, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='string_value', full_name='ts_mon.proto.MetricsData.string_value', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='int64_value', full_name='ts_mon.proto.MetricsData.int64_value', index=2,
+      number=3, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='double_value', full_name='ts_mon.proto.MetricsData.double_value', index=3,
+      number=4, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='distribution_value', full_name='ts_mon.proto.MetricsData.distribution_value', index=4,
+      number=5, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='field', full_name='ts_mon.proto.MetricsData.field', index=5,
+      number=6, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='start_timestamp', full_name='ts_mon.proto.MetricsData.start_timestamp', index=6,
+      number=7, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='end_timestamp', full_name='ts_mon.proto.MetricsData.end_timestamp', index=7,
+      number=8, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[_METRICSDATA_METRICFIELD, _METRICSDATA_DISTRIBUTION, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+    _descriptor.OneofDescriptor(
+      name='value', full_name='ts_mon.proto.MetricsData.value',
+      index=0, containing_type=None, fields=[]),
+  ],
+  serialized_start=871,
+  serialized_end=2106,
+)
+
+
+_ANNOTATIONS = _descriptor.Descriptor(
+  name='Annotations',
+  full_name='ts_mon.proto.Annotations',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='unit', full_name='ts_mon.proto.Annotations.unit', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='timestamp', full_name='ts_mon.proto.Annotations.timestamp', index=1,
+      number=2, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='deprecation', full_name='ts_mon.proto.Annotations.deprecation', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='annotation', full_name='ts_mon.proto.Annotations.annotation', index=3,
+      number=4, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2108,
+  serialized_end=2214,
+)
+
+_METRICSPAYLOAD.fields_by_name['metrics_collection'].message_type = _METRICSCOLLECTION
+_METRICSCOLLECTION.fields_by_name['metrics_data_set'].message_type = _METRICSDATASET
+_METRICSCOLLECTION.fields_by_name['network_device'].message_type = acquisition_network_device_pb2._NETWORKDEVICE
+_METRICSCOLLECTION.fields_by_name['task'].message_type = acquisition_task_pb2._TASK
+_METRICSCOLLECTION.oneofs_by_name['target_schema'].fields.append(
+  _METRICSCOLLECTION.fields_by_name['network_device'])
+_METRICSCOLLECTION.fields_by_name['network_device'].containing_oneof = _METRICSCOLLECTION.oneofs_by_name['target_schema']
+_METRICSCOLLECTION.oneofs_by_name['target_schema'].fields.append(
+  _METRICSCOLLECTION.fields_by_name['task'])
+_METRICSCOLLECTION.fields_by_name['task'].containing_oneof = _METRICSCOLLECTION.oneofs_by_name['target_schema']
+_METRICSDATASET_METRICFIELDDESCRIPTOR.fields_by_name['field_type'].enum_type = _METRICSDATASET_METRICFIELDDESCRIPTOR_FIELDTYPE
+_METRICSDATASET_METRICFIELDDESCRIPTOR.containing_type = _METRICSDATASET
+_METRICSDATASET_METRICFIELDDESCRIPTOR_FIELDTYPE.containing_type = _METRICSDATASET_METRICFIELDDESCRIPTOR
+_METRICSDATASET.fields_by_name['field_descriptor'].message_type = _METRICSDATASET_METRICFIELDDESCRIPTOR
+_METRICSDATASET.fields_by_name['stream_kind'].enum_type = _STREAMKIND
+_METRICSDATASET.fields_by_name['value_type'].enum_type = _VALUETYPE
+_METRICSDATASET.fields_by_name['annotations'].message_type = _ANNOTATIONS
+_METRICSDATASET.fields_by_name['data'].message_type = _METRICSDATA
+_METRICSDATA_METRICFIELD.containing_type = _METRICSDATA
+_METRICSDATA_METRICFIELD.oneofs_by_name['value'].fields.append(
+  _METRICSDATA_METRICFIELD.fields_by_name['string_value'])
+_METRICSDATA_METRICFIELD.fields_by_name['string_value'].containing_oneof = _METRICSDATA_METRICFIELD.oneofs_by_name['value']
+_METRICSDATA_METRICFIELD.oneofs_by_name['value'].fields.append(
+  _METRICSDATA_METRICFIELD.fields_by_name['int64_value'])
+_METRICSDATA_METRICFIELD.fields_by_name['int64_value'].containing_oneof = _METRICSDATA_METRICFIELD.oneofs_by_name['value']
+_METRICSDATA_METRICFIELD.oneofs_by_name['value'].fields.append(
+  _METRICSDATA_METRICFIELD.fields_by_name['bool_value'])
+_METRICSDATA_METRICFIELD.fields_by_name['bool_value'].containing_oneof = _METRICSDATA_METRICFIELD.oneofs_by_name['value']
+_METRICSDATA_DISTRIBUTION_LINEAROPTIONS.containing_type = _METRICSDATA_DISTRIBUTION
+_METRICSDATA_DISTRIBUTION_EXPONENTIALOPTIONS.containing_type = _METRICSDATA_DISTRIBUTION
+_METRICSDATA_DISTRIBUTION_EXPLICITOPTIONS.containing_type = _METRICSDATA_DISTRIBUTION
+_METRICSDATA_DISTRIBUTION_EXEMPLAR.fields_by_name['timestamp'].message_type = timestamp_pb2._TIMESTAMP
+_METRICSDATA_DISTRIBUTION_EXEMPLAR.fields_by_name['attachment'].message_type = any_pb2._ANY
+_METRICSDATA_DISTRIBUTION_EXEMPLAR.containing_type = _METRICSDATA_DISTRIBUTION
+_METRICSDATA_DISTRIBUTION.fields_by_name['linear_buckets'].message_type = _METRICSDATA_DISTRIBUTION_LINEAROPTIONS
+_METRICSDATA_DISTRIBUTION.fields_by_name['exponential_buckets'].message_type = _METRICSDATA_DISTRIBUTION_EXPONENTIALOPTIONS
+_METRICSDATA_DISTRIBUTION.fields_by_name['explicit_buckets'].message_type = _METRICSDATA_DISTRIBUTION_EXPLICITOPTIONS
+_METRICSDATA_DISTRIBUTION.fields_by_name['exemplar'].message_type = _METRICSDATA_DISTRIBUTION_EXEMPLAR
+_METRICSDATA_DISTRIBUTION.containing_type = _METRICSDATA
+_METRICSDATA_DISTRIBUTION.oneofs_by_name['bucket_options'].fields.append(
+  _METRICSDATA_DISTRIBUTION.fields_by_name['linear_buckets'])
+_METRICSDATA_DISTRIBUTION.fields_by_name['linear_buckets'].containing_oneof = _METRICSDATA_DISTRIBUTION.oneofs_by_name['bucket_options']
+_METRICSDATA_DISTRIBUTION.oneofs_by_name['bucket_options'].fields.append(
+  _METRICSDATA_DISTRIBUTION.fields_by_name['exponential_buckets'])
+_METRICSDATA_DISTRIBUTION.fields_by_name['exponential_buckets'].containing_oneof = _METRICSDATA_DISTRIBUTION.oneofs_by_name['bucket_options']
+_METRICSDATA_DISTRIBUTION.oneofs_by_name['bucket_options'].fields.append(
+  _METRICSDATA_DISTRIBUTION.fields_by_name['explicit_buckets'])
+_METRICSDATA_DISTRIBUTION.fields_by_name['explicit_buckets'].containing_oneof = _METRICSDATA_DISTRIBUTION.oneofs_by_name['bucket_options']
+_METRICSDATA.fields_by_name['distribution_value'].message_type = _METRICSDATA_DISTRIBUTION
+_METRICSDATA.fields_by_name['field'].message_type = _METRICSDATA_METRICFIELD
+_METRICSDATA.fields_by_name['start_timestamp'].message_type = timestamp_pb2._TIMESTAMP
+_METRICSDATA.fields_by_name['end_timestamp'].message_type = timestamp_pb2._TIMESTAMP
+_METRICSDATA.oneofs_by_name['value'].fields.append(
+  _METRICSDATA.fields_by_name['bool_value'])
+_METRICSDATA.fields_by_name['bool_value'].containing_oneof = _METRICSDATA.oneofs_by_name['value']
+_METRICSDATA.oneofs_by_name['value'].fields.append(
+  _METRICSDATA.fields_by_name['string_value'])
+_METRICSDATA.fields_by_name['string_value'].containing_oneof = _METRICSDATA.oneofs_by_name['value']
+_METRICSDATA.oneofs_by_name['value'].fields.append(
+  _METRICSDATA.fields_by_name['int64_value'])
+_METRICSDATA.fields_by_name['int64_value'].containing_oneof = _METRICSDATA.oneofs_by_name['value']
+_METRICSDATA.oneofs_by_name['value'].fields.append(
+  _METRICSDATA.fields_by_name['double_value'])
+_METRICSDATA.fields_by_name['double_value'].containing_oneof = _METRICSDATA.oneofs_by_name['value']
+_METRICSDATA.oneofs_by_name['value'].fields.append(
+  _METRICSDATA.fields_by_name['distribution_value'])
+_METRICSDATA.fields_by_name['distribution_value'].containing_oneof = _METRICSDATA.oneofs_by_name['value']
+_ANNOTATIONS.fields_by_name['annotation'].message_type = any_pb2._ANY
+DESCRIPTOR.message_types_by_name['MetricsPayload'] = _METRICSPAYLOAD
+DESCRIPTOR.message_types_by_name['MetricsCollection'] = _METRICSCOLLECTION
+DESCRIPTOR.message_types_by_name['MetricsDataSet'] = _METRICSDATASET
+DESCRIPTOR.message_types_by_name['MetricsData'] = _METRICSDATA
+DESCRIPTOR.message_types_by_name['Annotations'] = _ANNOTATIONS
+DESCRIPTOR.enum_types_by_name['StreamKind'] = _STREAMKIND
+DESCRIPTOR.enum_types_by_name['ValueType'] = _VALUETYPE
+
+MetricsPayload = _reflection.GeneratedProtocolMessageType('MetricsPayload', (_message.Message,), dict(
+  DESCRIPTOR = _METRICSPAYLOAD,
+  __module__ = 'metrics_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsPayload)
+  ))
+_sym_db.RegisterMessage(MetricsPayload)
+
+MetricsCollection = _reflection.GeneratedProtocolMessageType('MetricsCollection', (_message.Message,), dict(
+  DESCRIPTOR = _METRICSCOLLECTION,
+  __module__ = 'metrics_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsCollection)
+  ))
+_sym_db.RegisterMessage(MetricsCollection)
+
+MetricsDataSet = _reflection.GeneratedProtocolMessageType('MetricsDataSet', (_message.Message,), dict(
+
+  MetricFieldDescriptor = _reflection.GeneratedProtocolMessageType('MetricFieldDescriptor', (_message.Message,), dict(
+    DESCRIPTOR = _METRICSDATASET_METRICFIELDDESCRIPTOR,
+    __module__ = 'metrics_pb2'
+    # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsDataSet.MetricFieldDescriptor)
+    ))
+  ,
+  DESCRIPTOR = _METRICSDATASET,
+  __module__ = 'metrics_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsDataSet)
+  ))
+_sym_db.RegisterMessage(MetricsDataSet)
+_sym_db.RegisterMessage(MetricsDataSet.MetricFieldDescriptor)
+
+MetricsData = _reflection.GeneratedProtocolMessageType('MetricsData', (_message.Message,), dict(
+
+  MetricField = _reflection.GeneratedProtocolMessageType('MetricField', (_message.Message,), dict(
+    DESCRIPTOR = _METRICSDATA_METRICFIELD,
+    __module__ = 'metrics_pb2'
+    # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsData.MetricField)
+    ))
+  ,
+
+  Distribution = _reflection.GeneratedProtocolMessageType('Distribution', (_message.Message,), dict(
+
+    LinearOptions = _reflection.GeneratedProtocolMessageType('LinearOptions', (_message.Message,), dict(
+      DESCRIPTOR = _METRICSDATA_DISTRIBUTION_LINEAROPTIONS,
+      __module__ = 'metrics_pb2'
+      # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsData.Distribution.LinearOptions)
+      ))
+    ,
+
+    ExponentialOptions = _reflection.GeneratedProtocolMessageType('ExponentialOptions', (_message.Message,), dict(
+      DESCRIPTOR = _METRICSDATA_DISTRIBUTION_EXPONENTIALOPTIONS,
+      __module__ = 'metrics_pb2'
+      # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsData.Distribution.ExponentialOptions)
+      ))
+    ,
+
+    ExplicitOptions = _reflection.GeneratedProtocolMessageType('ExplicitOptions', (_message.Message,), dict(
+      DESCRIPTOR = _METRICSDATA_DISTRIBUTION_EXPLICITOPTIONS,
+      __module__ = 'metrics_pb2'
+      # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsData.Distribution.ExplicitOptions)
+      ))
+    ,
+
+    Exemplar = _reflection.GeneratedProtocolMessageType('Exemplar', (_message.Message,), dict(
+      DESCRIPTOR = _METRICSDATA_DISTRIBUTION_EXEMPLAR,
+      __module__ = 'metrics_pb2'
+      # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsData.Distribution.Exemplar)
+      ))
+    ,
+    DESCRIPTOR = _METRICSDATA_DISTRIBUTION,
+    __module__ = 'metrics_pb2'
+    # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsData.Distribution)
+    ))
+  ,
+  DESCRIPTOR = _METRICSDATA,
+  __module__ = 'metrics_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.MetricsData)
+  ))
+_sym_db.RegisterMessage(MetricsData)
+_sym_db.RegisterMessage(MetricsData.MetricField)
+_sym_db.RegisterMessage(MetricsData.Distribution)
+_sym_db.RegisterMessage(MetricsData.Distribution.LinearOptions)
+_sym_db.RegisterMessage(MetricsData.Distribution.ExponentialOptions)
+_sym_db.RegisterMessage(MetricsData.Distribution.ExplicitOptions)
+_sym_db.RegisterMessage(MetricsData.Distribution.Exemplar)
+
+Annotations = _reflection.GeneratedProtocolMessageType('Annotations', (_message.Message,), dict(
+  DESCRIPTOR = _ANNOTATIONS,
+  __module__ = 'metrics_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.Annotations)
+  ))
+_sym_db.RegisterMessage(Annotations)
+
+
+_METRICSDATA_DISTRIBUTION_EXPLICITOPTIONS.fields_by_name['bound'].has_options = True
+_METRICSDATA_DISTRIBUTION_EXPLICITOPTIONS.fields_by_name['bound']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
+_METRICSDATA_DISTRIBUTION.fields_by_name['bucket_count'].has_options = True
+_METRICSDATA_DISTRIBUTION.fields_by_name['bucket_count']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
+# @@protoc_insertion_point(module_scope)
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/timestamp.proto b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/timestamp.proto
new file mode 100644
index 0000000..83ab8ec
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/timestamp.proto
@@ -0,0 +1,11 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+syntax = "proto2";
+
+package ts_mon.proto;
+
+message Timestamp {
+  optional int64 seconds = 1;
+  optional int32 nanos = 2;
+}
diff --git a/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/timestamp_pb2.py b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/timestamp_pb2.py
new file mode 100644
index 0000000..10bb23d
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/ts_mon/protos/timestamp_pb2.py
@@ -0,0 +1,74 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: timestamp.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='timestamp.proto',
+  package='ts_mon.proto',
+  serialized_pb=_b('\n\x0ftimestamp.proto\x12\x0cts_mon.proto\"+\n\tTimestamp\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05')
+)
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+
+
+_TIMESTAMP = _descriptor.Descriptor(
+  name='Timestamp',
+  full_name='ts_mon.proto.Timestamp',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='seconds', full_name='ts_mon.proto.Timestamp.seconds', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+    _descriptor.FieldDescriptor(
+      name='nanos', full_name='ts_mon.proto.Timestamp.nanos', index=1,
+      number=2, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=33,
+  serialized_end=76,
+)
+
+DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP
+
+Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), dict(
+  DESCRIPTOR = _TIMESTAMP,
+  __module__ = 'timestamp_pb2'
+  # @@protoc_insertion_point(class_scope:ts_mon.proto.Timestamp)
+  ))
+_sym_db.RegisterMessage(Timestamp)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/utils/frozen_chromite/third_party/infra_libs/utils.py b/utils/frozen_chromite/third_party/infra_libs/utils.py
new file mode 100644
index 0000000..5e59872
--- /dev/null
+++ b/utils/frozen_chromite/third_party/infra_libs/utils.py
@@ -0,0 +1,49 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Miscellaneous utility functions."""
+
+import contextlib
+import shutil
+import sys
+import tempfile
+
+
+# We're trying to be compatible with Python3 tempfile.TemporaryDirectory
+# context manager here. And they used 'dir' as a keyword argument.
+# pylint: disable=redefined-builtin
[email protected]
+def temporary_directory(suffix="", prefix="tmp", dir=None,
+                        keep_directory=False):
+  """Create and return a temporary directory.  This has the same
+  behavior as mkdtemp but can be used as a context manager.  For
+  example:
+
+    with temporary_directory() as tmpdir:
+      ...
+
+  Upon exiting the context, the directory and everything contained
+  in it are removed.
+
+  Args:
+    suffix, prefix, dir: same arguments as for tempfile.mkdtemp.
+    keep_directory (bool): if True, do not delete the temporary directory
+      when exiting. Useful for debugging.
+
+  Returns:
+    tempdir (str): full path to the temporary directory.
+  """
+  tempdir = None  # Handle mkdtemp raising an exception
+  try:
+    tempdir = tempfile.mkdtemp(suffix, prefix, dir)
+    yield tempdir
+
+  finally:
+    if tempdir and not keep_directory:  # pragma: no branch
+      try:
+        # TODO(pgervais,496347) Make this work reliably on Windows.
+        shutil.rmtree(tempdir, ignore_errors=True)
+      except OSError as ex:  # pragma: no cover
+        print >> sys.stderr, (
+          "ERROR: {!r} while cleaning up {!r}".format(ex, tempdir))
diff --git a/utils/frozen_chromite/third_party/mock.py b/utils/frozen_chromite/third_party/mock.py
new file mode 100644
index 0000000..509c5a4
--- /dev/null
+++ b/utils/frozen_chromite/third_party/mock.py
@@ -0,0 +1,2376 @@
+# mock.py
+# Test tools for mocking and patching.
+# Copyright (C) 2007-2012 Michael Foord & the mock team
+# E-mail: fuzzyman AT voidspace DOT org DOT uk
+
+# mock 1.0
+# http://www.voidspace.org.uk/python/mock/
+
+# Released subject to the BSD License
+# Please see http://www.voidspace.org.uk/python/license.shtml
+
+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
+# Comments, suggestions and bug reports welcome.
+
+
+__all__ = (
+    'Mock',
+    'MagicMock',
+    'patch',
+    'sentinel',
+    'DEFAULT',
+    'ANY',
+    'call',
+    'create_autospec',
+    'FILTER_DIR',
+    'NonCallableMock',
+    'NonCallableMagicMock',
+    'mock_open',
+    'PropertyMock',
+)
+
+
+__version__ = '1.0.1'
+
+
+import pprint
+import sys
+
+try:
+    import inspect
+except ImportError:
+    # for alternative platforms that
+    # may not have inspect
+    inspect = None
+
+try:
+    from functools import wraps as original_wraps
+except ImportError:
+    # Python 2.4 compatibility
+    def wraps(original):
+        def inner(f):
+            f.__name__ = original.__name__
+            f.__doc__ = original.__doc__
+            f.__module__ = original.__module__
+            f.__wrapped__ = original
+            return f
+        return inner
+else:
+    if sys.version_info[:2] >= (3, 3):
+        wraps = original_wraps
+    else:
+        def wraps(func):
+            def inner(f):
+                f = original_wraps(func)(f)
+                f.__wrapped__ = func
+                return f
+            return inner
+
+try:
+    unicode
+except NameError:
+    # Python 3
+    basestring = unicode = str
+
+try:
+    long
+except NameError:
+    # Python 3
+    long = int
+
+try:
+    BaseException
+except NameError:
+    # Python 2.4 compatibility
+    BaseException = Exception
+
+try:
+    next
+except NameError:
+    def next(obj):
+        return obj.next()
+
+
+BaseExceptions = (BaseException,)
+if 'java' in sys.platform:
+    # jython
+    import java
+    BaseExceptions = (BaseException, java.lang.Throwable)
+
+try:
+    _isidentifier = str.isidentifier
+except AttributeError:
+    # Python 2.X
+    import keyword
+    import re
+    regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
+    def _isidentifier(string):
+        if string in keyword.kwlist:
+            return False
+        return regex.match(string)
+
+
+inPy3k = sys.version_info[0] == 3
+
+# Needed to work around Python 3 bug where use of "super" interferes with
+# defining __class__ as a descriptor
+_super = super
+
+self = 'im_self'
+builtin = '__builtin__'
+if inPy3k:
+    self = '__self__'
+    builtin = 'builtins'
+
+FILTER_DIR = True
+
+
+def _is_instance_mock(obj):
+    # can't use isinstance on Mock objects because they override __class__
+    # The base class for all mocks is NonCallableMock
+    return issubclass(type(obj), NonCallableMock)
+
+
+def _is_exception(obj):
+    return (
+        isinstance(obj, BaseExceptions) or
+        isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
+    )
+
+
+class _slotted(object):
+    __slots__ = ['a']
+
+
+DescriptorTypes = (
+    type(_slotted.a),
+    property,
+)
+
+
+def _getsignature(func, skipfirst, instance=False):
+    if inspect is None:
+        raise ImportError('inspect module not available')
+
+    if isinstance(func, ClassTypes) and not instance:
+        try:
+            func = func.__init__
+        except AttributeError:
+            return
+        skipfirst = True
+    elif not isinstance(func, FunctionTypes):
+        # for classes where instance is True we end up here too
+        try:
+            func = func.__call__
+        except AttributeError:
+            return
+
+    if inPy3k:
+        try:
+            argspec = inspect.getfullargspec(func)
+        except TypeError:
+            # C function / method, possibly inherited object().__init__
+            return
+        regargs, varargs, varkw, defaults, kwonly, kwonlydef, ann = argspec
+    else:
+        try:
+            regargs, varargs, varkwargs, defaults = inspect.getargspec(func)
+        except TypeError:
+            # C function / method, possibly inherited object().__init__
+            return
+
+    # instance methods and classmethods need to lose the self argument
+    if getattr(func, self, None) is not None:
+        regargs = regargs[1:]
+    if skipfirst:
+        # this condition and the above one are never both True - why?
+        regargs = regargs[1:]
+
+    if inPy3k:
+        signature = inspect.formatargspec(
+            regargs, varargs, varkw, defaults,
+            kwonly, kwonlydef, ann, formatvalue=lambda value: "")
+    else:
+        signature = inspect.formatargspec(
+            regargs, varargs, varkwargs, defaults,
+            formatvalue=lambda value: "")
+    return signature[1:-1], func
+
+
+def _check_signature(func, mock, skipfirst, instance=False):
+    if not _callable(func):
+        return
+
+    result = _getsignature(func, skipfirst, instance)
+    if result is None:
+        return
+    signature, func = result
+
+    # can't use self because "self" is common as an argument name
+    # unfortunately even not in the first place
+    src = "lambda _mock_self, %s: None" % signature
+    checksig = eval(src, {})
+    _copy_func_details(func, checksig)
+    type(mock)._mock_check_sig = checksig
+
+
+def _copy_func_details(func, funcopy):
+    funcopy.__name__ = func.__name__
+    funcopy.__doc__ = func.__doc__
+    #funcopy.__dict__.update(func.__dict__)
+    try:
+        funcopy.__module__ = func.__module__
+    except AttributeError:
+        pass
+    if not inPy3k:
+        funcopy.func_defaults = func.func_defaults
+        return
+    try:
+        funcopy.__defaults__ = func.__defaults__
+    except AttributeError:
+        pass
+    try:
+        funcopy.__kwdefaults__ = func.__kwdefaults__
+    except AttributeError:
+        pass
+
+
+def _callable(obj):
+    if isinstance(obj, (ClassTypes, staticmethod, classmethod)):
+        return True
+    if getattr(obj, '__call__', None) is not None:
+        return True
+    return False
+
+
+def _is_list(obj):
+    # checks for list or tuples
+    # XXXX badly named!
+    return type(obj) in (list, tuple)
+
+
+def _instance_callable(obj):
+    """Given an object, return True if the object is callable.
+    For classes, return True if instances would be callable."""
+    if not isinstance(obj, ClassTypes):
+        # already an instance
+        return getattr(obj, '__call__', None) is not None
+
+    klass = obj
+    # uses __bases__ instead of __mro__ so that we work with old style classes
+    if klass.__dict__.get('__call__') is not None:
+        return True
+
+    for base in klass.__bases__:
+        if _instance_callable(base):
+            return True
+    return False
+
+
+def _set_signature(mock, original, instance=False):
+    # creates a function with signature (*args, **kwargs) that delegates to a
+    # mock. It still does signature checking by calling a lambda with the same
+    # signature as the original.
+    if not _callable(original):
+        return
+
+    skipfirst = isinstance(original, ClassTypes)
+    result = _getsignature(original, skipfirst, instance)
+    if result is None:
+        # was a C function (e.g. object().__init__ ) that can't be mocked
+        return
+
+    signature, func = result
+
+    src = "lambda %s: None" % signature
+    checksig = eval(src, {})
+    _copy_func_details(func, checksig)
+
+    name = original.__name__
+    if not _isidentifier(name):
+        name = 'funcopy'
+    context = {'_checksig_': checksig, 'mock': mock}
+    src = """def %s(*args, **kwargs):
+    _checksig_(*args, **kwargs)
+    return mock(*args, **kwargs)""" % name
+    exec (src, context)
+    funcopy = context[name]
+    _setup_func(funcopy, mock)
+    return funcopy
+
+
+def _setup_func(funcopy, mock):
+    funcopy.mock = mock
+
+    # can't use isinstance with mocks
+    if not _is_instance_mock(mock):
+        return
+
+    def assert_called_with(*args, **kwargs):
+        return mock.assert_called_with(*args, **kwargs)
+    def assert_called_once_with(*args, **kwargs):
+        return mock.assert_called_once_with(*args, **kwargs)
+    def assert_has_calls(*args, **kwargs):
+        return mock.assert_has_calls(*args, **kwargs)
+    def assert_any_call(*args, **kwargs):
+        return mock.assert_any_call(*args, **kwargs)
+    def reset_mock():
+        funcopy.method_calls = _CallList()
+        funcopy.mock_calls = _CallList()
+        mock.reset_mock()
+        ret = funcopy.return_value
+        if _is_instance_mock(ret) and not ret is mock:
+            ret.reset_mock()
+
+    funcopy.called = False
+    funcopy.call_count = 0
+    funcopy.call_args = None
+    funcopy.call_args_list = _CallList()
+    funcopy.method_calls = _CallList()
+    funcopy.mock_calls = _CallList()
+
+    funcopy.return_value = mock.return_value
+    funcopy.side_effect = mock.side_effect
+    funcopy._mock_children = mock._mock_children
+
+    funcopy.assert_called_with = assert_called_with
+    funcopy.assert_called_once_with = assert_called_once_with
+    funcopy.assert_has_calls = assert_has_calls
+    funcopy.assert_any_call = assert_any_call
+    funcopy.reset_mock = reset_mock
+
+    mock._mock_delegate = funcopy
+
+
+def _is_magic(name):
+    return '__%s__' % name[2:-2] == name
+
+
+class _SentinelObject(object):
+    "A unique, named, sentinel object."
+    def __init__(self, name):
+        self.name = name
+
+    def __repr__(self):
+        return 'sentinel.%s' % self.name
+
+
+class _Sentinel(object):
+    """Access attributes to return a named object, usable as a sentinel."""
+    def __init__(self):
+        self._sentinels = {}
+
+    def __getattr__(self, name):
+        if name == '__bases__':
+            # Without this help(mock) raises an exception
+            raise AttributeError
+        return self._sentinels.setdefault(name, _SentinelObject(name))
+
+
+sentinel = _Sentinel()
+
+DEFAULT = sentinel.DEFAULT
+_missing = sentinel.MISSING
+_deleted = sentinel.DELETED
+
+
+class OldStyleClass:
+    pass
+ClassType = type(OldStyleClass)
+
+
+def _copy(value):
+    if type(value) in (dict, list, tuple, set):
+        return type(value)(value)
+    return value
+
+
+ClassTypes = (type,)
+if not inPy3k:
+    ClassTypes = (type, ClassType)
+
+_allowed_names = set(
+    [
+        'return_value', '_mock_return_value', 'side_effect',
+        '_mock_side_effect', '_mock_parent', '_mock_new_parent',
+        '_mock_name', '_mock_new_name'
+    ]
+)
+
+
+def _delegating_property(name):
+    _allowed_names.add(name)
+    _the_name = '_mock_' + name
+    def _get(self, name=name, _the_name=_the_name):
+        sig = self._mock_delegate
+        if sig is None:
+            return getattr(self, _the_name)
+        return getattr(sig, name)
+    def _set(self, value, name=name, _the_name=_the_name):
+        sig = self._mock_delegate
+        if sig is None:
+            self.__dict__[_the_name] = value
+        else:
+            setattr(sig, name, value)
+
+    return property(_get, _set)
+
+
+
+class _CallList(list):
+
+    def __contains__(self, value):
+        if not isinstance(value, list):
+            return list.__contains__(self, value)
+        len_value = len(value)
+        len_self = len(self)
+        if len_value > len_self:
+            return False
+
+        for i in range(0, len_self - len_value + 1):
+            sub_list = self[i:i+len_value]
+            if sub_list == value:
+                return True
+        return False
+
+    def __repr__(self):
+        return pprint.pformat(list(self))
+
+
+def _check_and_set_parent(parent, value, name, new_name):
+    if not _is_instance_mock(value):
+        return False
+    if ((value._mock_name or value._mock_new_name) or
+        (value._mock_parent is not None) or
+        (value._mock_new_parent is not None)):
+        return False
+
+    _parent = parent
+    while _parent is not None:
+        # setting a mock (value) as a child or return value of itself
+        # should not modify the mock
+        if _parent is value:
+            return False
+        _parent = _parent._mock_new_parent
+
+    if new_name:
+        value._mock_new_parent = parent
+        value._mock_new_name = new_name
+    if name:
+        value._mock_parent = parent
+        value._mock_name = name
+    return True
+
+
+
+class Base(object):
+    _mock_return_value = DEFAULT
+    _mock_side_effect = None
+    def __init__(self, *args, **kwargs):
+        pass
+
+
+
+class NonCallableMock(Base):
+    """A non-callable version of `Mock`"""
+
+    def __new__(cls, *args, **kw):
+        # every instance has its own class
+        # so we can create magic methods on the
+        # class without stomping on other mocks
+        new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
+        instance = object.__new__(new)
+        return instance
+
+
+    def __init__(
+            self, spec=None, wraps=None, name=None, spec_set=None,
+            parent=None, _spec_state=None, _new_name='', _new_parent=None,
+            **kwargs
+        ):
+        if _new_parent is None:
+            _new_parent = parent
+
+        __dict__ = self.__dict__
+        __dict__['_mock_parent'] = parent
+        __dict__['_mock_name'] = name
+        __dict__['_mock_new_name'] = _new_name
+        __dict__['_mock_new_parent'] = _new_parent
+
+        if spec_set is not None:
+            spec = spec_set
+            spec_set = True
+
+        self._mock_add_spec(spec, spec_set)
+
+        __dict__['_mock_children'] = {}
+        __dict__['_mock_wraps'] = wraps
+        __dict__['_mock_delegate'] = None
+
+        __dict__['_mock_called'] = False
+        __dict__['_mock_call_args'] = None
+        __dict__['_mock_call_count'] = 0
+        __dict__['_mock_call_args_list'] = _CallList()
+        __dict__['_mock_mock_calls'] = _CallList()
+
+        __dict__['method_calls'] = _CallList()
+
+        if kwargs:
+            self.configure_mock(**kwargs)
+
+        _super(NonCallableMock, self).__init__(
+            spec, wraps, name, spec_set, parent,
+            _spec_state
+        )
+
+
+    def attach_mock(self, mock, attribute):
+        """
+        Attach a mock as an attribute of this one, replacing its name and
+        parent. Calls to the attached mock will be recorded in the
+        `method_calls` and `mock_calls` attributes of this one."""
+        mock._mock_parent = None
+        mock._mock_new_parent = None
+        mock._mock_name = ''
+        mock._mock_new_name = None
+
+        setattr(self, attribute, mock)
+
+
+    def mock_add_spec(self, spec, spec_set=False):
+        """Add a spec to a mock. `spec` can either be an object or a
+        list of strings. Only attributes on the `spec` can be fetched as
+        attributes from the mock.
+
+        If `spec_set` is True then only attributes on the spec can be set."""
+        self._mock_add_spec(spec, spec_set)
+
+
+    def _mock_add_spec(self, spec, spec_set):
+        _spec_class = None
+
+        if spec is not None and not _is_list(spec):
+            if isinstance(spec, ClassTypes):
+                _spec_class = spec
+            else:
+                _spec_class = _get_class(spec)
+
+            spec = dir(spec)
+
+        __dict__ = self.__dict__
+        __dict__['_spec_class'] = _spec_class
+        __dict__['_spec_set'] = spec_set
+        __dict__['_mock_methods'] = spec
+
+
+    def __get_return_value(self):
+        ret = self._mock_return_value
+        if self._mock_delegate is not None:
+            ret = self._mock_delegate.return_value
+
+        if ret is DEFAULT:
+            ret = self._get_child_mock(
+                _new_parent=self, _new_name='()'
+            )
+            self.return_value = ret
+        return ret
+
+
+    def __set_return_value(self, value):
+        if self._mock_delegate is not None:
+            self._mock_delegate.return_value = value
+        else:
+            self._mock_return_value = value
+            _check_and_set_parent(self, value, None, '()')
+
+    __return_value_doc = "The value to be returned when the mock is called."
+    return_value = property(__get_return_value, __set_return_value,
+                            __return_value_doc)
+
+
+    @property
+    def __class__(self):
+        if self._spec_class is None:
+            return type(self)
+        return self._spec_class
+
+    called = _delegating_property('called')
+    call_count = _delegating_property('call_count')
+    call_args = _delegating_property('call_args')
+    call_args_list = _delegating_property('call_args_list')
+    mock_calls = _delegating_property('mock_calls')
+
+
+    def __get_side_effect(self):
+        sig = self._mock_delegate
+        if sig is None:
+            return self._mock_side_effect
+        return sig.side_effect
+
+    def __set_side_effect(self, value):
+        value = _try_iter(value)
+        sig = self._mock_delegate
+        if sig is None:
+            self._mock_side_effect = value
+        else:
+            sig.side_effect = value
+
+    side_effect = property(__get_side_effect, __set_side_effect)
+
+
+    def reset_mock(self):
+        "Restore the mock object to its initial state."
+        self.called = False
+        self.call_args = None
+        self.call_count = 0
+        self.mock_calls = _CallList()
+        self.call_args_list = _CallList()
+        self.method_calls = _CallList()
+
+        for child in self._mock_children.values():
+            if isinstance(child, _SpecState):
+                continue
+            child.reset_mock()
+
+        ret = self._mock_return_value
+        if _is_instance_mock(ret) and ret is not self:
+            ret.reset_mock()
+
+
+    def configure_mock(self, **kwargs):
+        """Set attributes on the mock through keyword arguments.
+
+        Attributes plus return values and side effects can be set on child
+        mocks using standard dot notation and unpacking a dictionary in the
+        method call:
+
+        >>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
+        >>> mock.configure_mock(**attrs)"""
+        for arg, val in sorted(kwargs.items(),
+                               # we sort on the number of dots so that
+                               # attributes are set before we set attributes on
+                               # attributes
+                               key=lambda entry: entry[0].count('.')):
+            args = arg.split('.')
+            final = args.pop()
+            obj = self
+            for entry in args:
+                obj = getattr(obj, entry)
+            setattr(obj, final, val)
+
+
+    def __getattr__(self, name):
+        if name == '_mock_methods':
+            raise AttributeError(name)
+        elif self._mock_methods is not None:
+            if name not in self._mock_methods or name in _all_magics:
+                raise AttributeError("Mock object has no attribute %r" % name)
+        elif _is_magic(name):
+            raise AttributeError(name)
+
+        result = self._mock_children.get(name)
+        if result is _deleted:
+            raise AttributeError(name)
+        elif result is None:
+            wraps = None
+            if self._mock_wraps is not None:
+                # XXXX should we get the attribute without triggering code
+                # execution?
+                wraps = getattr(self._mock_wraps, name)
+
+            result = self._get_child_mock(
+                parent=self, name=name, wraps=wraps, _new_name=name,
+                _new_parent=self
+            )
+            self._mock_children[name]  = result
+
+        elif isinstance(result, _SpecState):
+            result = create_autospec(
+                result.spec, result.spec_set, result.instance,
+                result.parent, result.name
+            )
+            self._mock_children[name]  = result
+
+        return result
+
+
+    def __repr__(self):
+        _name_list = [self._mock_new_name]
+        _parent = self._mock_new_parent
+        last = self
+
+        dot = '.'
+        if _name_list == ['()']:
+            dot = ''
+        seen = set()
+        while _parent is not None:
+            last = _parent
+
+            _name_list.append(_parent._mock_new_name + dot)
+            dot = '.'
+            if _parent._mock_new_name == '()':
+                dot = ''
+
+            _parent = _parent._mock_new_parent
+
+            # use ids here so as not to call __hash__ on the mocks
+            if id(_parent) in seen:
+                break
+            seen.add(id(_parent))
+
+        _name_list = list(reversed(_name_list))
+        _first = last._mock_name or 'mock'
+        if len(_name_list) > 1:
+            if _name_list[1] not in ('()', '().'):
+                _first += '.'
+        _name_list[0] = _first
+        name = ''.join(_name_list)
+
+        name_string = ''
+        if name not in ('mock', 'mock.'):
+            name_string = ' name=%r' % name
+
+        spec_string = ''
+        if self._spec_class is not None:
+            spec_string = ' spec=%r'
+            if self._spec_set:
+                spec_string = ' spec_set=%r'
+            spec_string = spec_string % self._spec_class.__name__
+        return "<%s%s%s id='%s'>" % (
+            type(self).__name__,
+            name_string,
+            spec_string,
+            id(self)
+        )
+
+
+    def __dir__(self):
+        """Filter the output of `dir(mock)` to only useful members.
+        XXXX
+        """
+        extras = self._mock_methods or []
+        from_type = dir(type(self))
+        from_dict = list(self.__dict__)
+
+        if FILTER_DIR:
+            from_type = [e for e in from_type if not e.startswith('_')]
+            from_dict = [e for e in from_dict if not e.startswith('_') or
+                         _is_magic(e)]
+        return sorted(set(extras + from_type + from_dict +
+                          list(self._mock_children)))
+
+
+    def __setattr__(self, name, value):
+        if name in _allowed_names:
+            # property setters go through here
+            return object.__setattr__(self, name, value)
+        elif (self._spec_set and self._mock_methods is not None and
+            name not in self._mock_methods and
+            name not in self.__dict__):
+            raise AttributeError("Mock object has no attribute '%s'" % name)
+        elif name in _unsupported_magics:
+            msg = 'Attempting to set unsupported magic method %r.' % name
+            raise AttributeError(msg)
+        elif name in _all_magics:
+            if self._mock_methods is not None and name not in self._mock_methods:
+                raise AttributeError("Mock object has no attribute '%s'" % name)
+
+            if not _is_instance_mock(value):
+                setattr(type(self), name, _get_method(name, value))
+                original = value
+                value = lambda *args, **kw: original(self, *args, **kw)
+            else:
+                # only set _new_name and not name so that mock_calls is tracked
+                # but not method calls
+                _check_and_set_parent(self, value, None, name)
+                setattr(type(self), name, value)
+                self._mock_children[name] = value
+        elif name == '__class__':
+            self._spec_class = value
+            return
+        else:
+            if _check_and_set_parent(self, value, name, name):
+                self._mock_children[name] = value
+        return object.__setattr__(self, name, value)
+
+
+    def __delattr__(self, name):
+        if name in _all_magics and name in type(self).__dict__:
+            delattr(type(self), name)
+            if name not in self.__dict__:
+                # for magic methods that are still MagicProxy objects and
+                # not set on the instance itself
+                return
+
+        if name in self.__dict__:
+            object.__delattr__(self, name)
+
+        obj = self._mock_children.get(name, _missing)
+        if obj is _deleted:
+            raise AttributeError(name)
+        if obj is not _missing:
+            del self._mock_children[name]
+        self._mock_children[name] = _deleted
+
+
+
+    def _format_mock_call_signature(self, args, kwargs):
+        name = self._mock_name or 'mock'
+        return _format_call_signature(name, args, kwargs)
+
+
+    def _format_mock_failure_message(self, args, kwargs):
+        message = 'Expected call: %s\nActual call: %s'
+        expected_string = self._format_mock_call_signature(args, kwargs)
+        call_args = self.call_args
+        if len(call_args) == 3:
+            call_args = call_args[1:]
+        actual_string = self._format_mock_call_signature(*call_args)
+        return message % (expected_string, actual_string)
+
+
+    def assert_called_with(_mock_self, *args, **kwargs):
+        """assert that the mock was called with the specified arguments.
+
+        Raises an AssertionError if the args and keyword args passed in are
+        different to the last call to the mock."""
+        self = _mock_self
+        if self.call_args is None:
+            expected = self._format_mock_call_signature(args, kwargs)
+            raise AssertionError('Expected call: %s\nNot called' % (expected,))
+
+        if self.call_args != (args, kwargs):
+            msg = self._format_mock_failure_message(args, kwargs)
+            raise AssertionError(msg)
+
+
+    def assert_called_once_with(_mock_self, *args, **kwargs):
+        """assert that the mock was called exactly once and with the specified
+        arguments."""
+        self = _mock_self
+        if not self.call_count == 1:
+            msg = ("Expected to be called once. Called %s times." %
+                   self.call_count)
+            raise AssertionError(msg)
+        return self.assert_called_with(*args, **kwargs)
+
+
+    def assert_has_calls(self, calls, any_order=False):
+        """assert the mock has been called with the specified calls.
+        The `mock_calls` list is checked for the calls.
+
+        If `any_order` is False (the default) then the calls must be
+        sequential. There can be extra calls before or after the
+        specified calls.
+
+        If `any_order` is True then the calls can be in any order, but
+        they must all appear in `mock_calls`."""
+        if not any_order:
+            if calls not in self.mock_calls:
+                raise AssertionError(
+                    'Calls not found.\nExpected: %r\n'
+                    'Actual: %r' % (calls, self.mock_calls)
+                )
+            return
+
+        all_calls = list(self.mock_calls)
+
+        not_found = []
+        for kall in calls:
+            try:
+                all_calls.remove(kall)
+            except ValueError:
+                not_found.append(kall)
+        if not_found:
+            raise AssertionError(
+                '%r not all found in call list' % (tuple(not_found),)
+            )
+
+
+    def assert_any_call(self, *args, **kwargs):
+        """assert the mock has been called with the specified arguments.
+
+        The assert passes if the mock has *ever* been called, unlike
+        `assert_called_with` and `assert_called_once_with` that only pass if
+        the call is the most recent one."""
+        kall = call(*args, **kwargs)
+        if kall not in self.call_args_list:
+            expected_string = self._format_mock_call_signature(args, kwargs)
+            raise AssertionError(
+                '%s call not found' % expected_string
+            )
+
+
+    def _get_child_mock(self, **kw):
+        """Create the child mocks for attributes and return value.
+        By default child mocks will be the same type as the parent.
+        Subclasses of Mock may want to override this to customize the way
+        child mocks are made.
+
+        For non-callable mocks the callable variant will be used (rather than
+        any custom subclass)."""
+        _type = type(self)
+        if not issubclass(_type, CallableMixin):
+            if issubclass(_type, NonCallableMagicMock):
+                klass = MagicMock
+            elif issubclass(_type, NonCallableMock) :
+                klass = Mock
+        else:
+            klass = _type.__mro__[1]
+        return klass(**kw)
+
+
+
+def _try_iter(obj):
+    if obj is None:
+        return obj
+    if _is_exception(obj):
+        return obj
+    if _callable(obj):
+        return obj
+    try:
+        return iter(obj)
+    except TypeError:
+        # XXXX backwards compatibility
+        # but this will blow up on first call - so maybe we should fail early?
+        return obj
+
+
+
+class CallableMixin(Base):
+
+    def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
+                 wraps=None, name=None, spec_set=None, parent=None,
+                 _spec_state=None, _new_name='', _new_parent=None, **kwargs):
+        self.__dict__['_mock_return_value'] = return_value
+
+        _super(CallableMixin, self).__init__(
+            spec, wraps, name, spec_set, parent,
+            _spec_state, _new_name, _new_parent, **kwargs
+        )
+
+        self.side_effect = side_effect
+
+
+    def _mock_check_sig(self, *args, **kwargs):
+        # stub method that can be replaced with one with a specific signature
+        pass
+
+
+    def __call__(_mock_self, *args, **kwargs):
+        # can't use self in-case a function / method we are mocking uses self
+        # in the signature
+        _mock_self._mock_check_sig(*args, **kwargs)
+        return _mock_self._mock_call(*args, **kwargs)
+
+
+    def _mock_call(_mock_self, *args, **kwargs):
+        self = _mock_self
+        self.called = True
+        self.call_count += 1
+        self.call_args = _Call((args, kwargs), two=True)
+        self.call_args_list.append(_Call((args, kwargs), two=True))
+
+        _new_name = self._mock_new_name
+        _new_parent = self._mock_new_parent
+        self.mock_calls.append(_Call(('', args, kwargs)))
+
+        seen = set()
+        skip_next_dot = _new_name == '()'
+        do_method_calls = self._mock_parent is not None
+        name = self._mock_name
+        while _new_parent is not None:
+            this_mock_call = _Call((_new_name, args, kwargs))
+            if _new_parent._mock_new_name:
+                dot = '.'
+                if skip_next_dot:
+                    dot = ''
+
+                skip_next_dot = False
+                if _new_parent._mock_new_name == '()':
+                    skip_next_dot = True
+
+                _new_name = _new_parent._mock_new_name + dot + _new_name
+
+            if do_method_calls:
+                if _new_name == name:
+                    this_method_call = this_mock_call
+                else:
+                    this_method_call = _Call((name, args, kwargs))
+                _new_parent.method_calls.append(this_method_call)
+
+                do_method_calls = _new_parent._mock_parent is not None
+                if do_method_calls:
+                    name = _new_parent._mock_name + '.' + name
+
+            _new_parent.mock_calls.append(this_mock_call)
+            _new_parent = _new_parent._mock_new_parent
+
+            # use ids here so as not to call __hash__ on the mocks
+            _new_parent_id = id(_new_parent)
+            if _new_parent_id in seen:
+                break
+            seen.add(_new_parent_id)
+
+        ret_val = DEFAULT
+        effect = self.side_effect
+        if effect is not None:
+            if _is_exception(effect):
+                raise effect
+
+            if not _callable(effect):
+                result = next(effect)
+                if _is_exception(result):
+                    raise result
+                return result
+
+            ret_val = effect(*args, **kwargs)
+            if ret_val is DEFAULT:
+                ret_val = self.return_value
+
+        if (self._mock_wraps is not None and
+             self._mock_return_value is DEFAULT):
+            return self._mock_wraps(*args, **kwargs)
+        if ret_val is DEFAULT:
+            ret_val = self.return_value
+        return ret_val
+
+
+
+class Mock(CallableMixin, NonCallableMock):
+    """
+    Create a new `Mock` object. `Mock` takes several optional arguments
+    that specify the behaviour of the Mock object:
+
+    * `spec`: This can be either a list of strings or an existing object (a
+      class or instance) that acts as the specification for the mock object. If
+      you pass in an object then a list of strings is formed by calling dir on
+      the object (excluding unsupported magic attributes and methods). Accessing
+      any attribute not in this list will raise an `AttributeError`.
+
+      If `spec` is an object (rather than a list of strings) then
+      `mock.__class__` returns the class of the spec object. This allows mocks
+      to pass `isinstance` tests.
+
+    * `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
+      or get an attribute on the mock that isn't on the object passed as
+      `spec_set` will raise an `AttributeError`.
+
+    * `side_effect`: A function to be called whenever the Mock is called. See
+      the `side_effect` attribute. Useful for raising exceptions or
+      dynamically changing return values. The function is called with the same
+      arguments as the mock, and unless it returns `DEFAULT`, the return
+      value of this function is used as the return value.
+
+      Alternatively `side_effect` can be an exception class or instance. In
+      this case the exception will be raised when the mock is called.
+
+      If `side_effect` is an iterable then each call to the mock will return
+      the next value from the iterable. If any of the members of the iterable
+      are exceptions they will be raised instead of returned.
+
+    * `return_value`: The value returned when the mock is called. By default
+      this is a new Mock (created on first access). See the
+      `return_value` attribute.
+
+    * `wraps`: Item for the mock object to wrap. If `wraps` is not None then
+      calling the Mock will pass the call through to the wrapped object
+      (returning the real result). Attribute access on the mock will return a
+      Mock object that wraps the corresponding attribute of the wrapped object
+      (so attempting to access an attribute that doesn't exist will raise an
+      `AttributeError`).
+
+      If the mock has an explicit `return_value` set then calls are not passed
+      to the wrapped object and the `return_value` is returned instead.
+
+    * `name`: If the mock has a name then it will be used in the repr of the
+      mock. This can be useful for debugging. The name is propagated to child
+      mocks.
+
+    Mocks can also be called with arbitrary keyword arguments. These will be
+    used to set attributes on the mock after it is created.
+    """
+
+
+
+def _dot_lookup(thing, comp, import_path):
+    try:
+        return getattr(thing, comp)
+    except AttributeError:
+        __import__(import_path)
+        return getattr(thing, comp)
+
+
+def _importer(target):
+    components = target.split('.')
+    import_path = components.pop(0)
+    thing = __import__(import_path)
+
+    for comp in components:
+        import_path += ".%s" % comp
+        thing = _dot_lookup(thing, comp, import_path)
+    return thing
+
+
+def _is_started(patcher):
+    # XXXX horrible
+    return hasattr(patcher, 'is_local')
+
+
+class _patch(object):
+
+    attribute_name = None
+    _active_patches = set()
+
+    def __init__(
+            self, getter, attribute, new, spec, create,
+            spec_set, autospec, new_callable, kwargs
+        ):
+        if new_callable is not None:
+            if new is not DEFAULT:
+                raise ValueError(
+                    "Cannot use 'new' and 'new_callable' together"
+                )
+            if autospec is not None:
+                raise ValueError(
+                    "Cannot use 'autospec' and 'new_callable' together"
+                )
+
+        self.getter = getter
+        self.attribute = attribute
+        self.new = new
+        self.new_callable = new_callable
+        self.spec = spec
+        self.create = create
+        self.has_local = False
+        self.spec_set = spec_set
+        self.autospec = autospec
+        self.kwargs = kwargs
+        self.additional_patchers = []
+
+
+    def copy(self):
+        patcher = _patch(
+            self.getter, self.attribute, self.new, self.spec,
+            self.create, self.spec_set,
+            self.autospec, self.new_callable, self.kwargs
+        )
+        patcher.attribute_name = self.attribute_name
+        patcher.additional_patchers = [
+            p.copy() for p in self.additional_patchers
+        ]
+        return patcher
+
+
+    def __call__(self, func):
+        if isinstance(func, ClassTypes):
+            return self.decorate_class(func)
+        return self.decorate_callable(func)
+
+
+    def decorate_class(self, klass):
+        for attr in dir(klass):
+            if not attr.startswith(patch.TEST_PREFIX):
+                continue
+
+            attr_value = getattr(klass, attr)
+            if not hasattr(attr_value, "__call__"):
+                continue
+
+            patcher = self.copy()
+            setattr(klass, attr, patcher(attr_value))
+        return klass
+
+
+    def decorate_callable(self, func):
+        if hasattr(func, 'patchings'):
+            func.patchings.append(self)
+            return func
+
+        @wraps(func)
+        def patched(*args, **keywargs):
+            # don't use a with here (backwards compatability with Python 2.4)
+            extra_args = []
+            entered_patchers = []
+
+            # can't use try...except...finally because of Python 2.4
+            # compatibility
+            exc_info = tuple()
+            try:
+                try:
+                    for patching in patched.patchings:
+                        arg = patching.__enter__()
+                        entered_patchers.append(patching)
+                        if patching.attribute_name is not None:
+                            keywargs.update(arg)
+                        elif patching.new is DEFAULT:
+                            extra_args.append(arg)
+
+                    args += tuple(extra_args)
+                    return func(*args, **keywargs)
+                except:
+                    if (patching not in entered_patchers and
+                        _is_started(patching)):
+                        # the patcher may have been started, but an exception
+                        # raised whilst entering one of its additional_patchers
+                        entered_patchers.append(patching)
+                    # Pass the exception to __exit__
+                    exc_info = sys.exc_info()
+                    # re-raise the exception
+                    raise
+            finally:
+                for patching in reversed(entered_patchers):
+                    patching.__exit__(*exc_info)
+
+        patched.patchings = [self]
+        if hasattr(func, 'func_code'):
+            # not in Python 3
+            patched.compat_co_firstlineno = getattr(
+                func, "compat_co_firstlineno",
+                func.func_code.co_firstlineno
+            )
+        return patched
+
+
+    def get_original(self):
+        target = self.getter()
+        name = self.attribute
+
+        original = DEFAULT
+        local = False
+
+        try:
+            original = target.__dict__[name]
+        except (AttributeError, KeyError):
+            original = getattr(target, name, DEFAULT)
+        else:
+            local = True
+
+        if not self.create and original is DEFAULT:
+            raise AttributeError(
+                "%s does not have the attribute %r" % (target, name)
+            )
+        return original, local
+
+
+    def __enter__(self):
+        """Perform the patch."""
+        new, spec, spec_set = self.new, self.spec, self.spec_set
+        autospec, kwargs = self.autospec, self.kwargs
+        new_callable = self.new_callable
+        self.target = self.getter()
+
+        # normalise False to None
+        if spec is False:
+            spec = None
+        if spec_set is False:
+            spec_set = None
+        if autospec is False:
+            autospec = None
+
+        if spec is not None and autospec is not None:
+            raise TypeError("Can't specify spec and autospec")
+        if ((spec is not None or autospec is not None) and
+            spec_set not in (True, None)):
+            raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
+
+        original, local = self.get_original()
+
+        if new is DEFAULT and autospec is None:
+            inherit = False
+            if spec is True:
+                # set spec to the object we are replacing
+                spec = original
+                if spec_set is True:
+                    spec_set = original
+                    spec = None
+            elif spec is not None:
+                if spec_set is True:
+                    spec_set = spec
+                    spec = None
+            elif spec_set is True:
+                spec_set = original
+
+            if spec is not None or spec_set is not None:
+                if original is DEFAULT:
+                    raise TypeError("Can't use 'spec' with create=True")
+                if isinstance(original, ClassTypes):
+                    # If we're patching out a class and there is a spec
+                    inherit = True
+
+            Klass = MagicMock
+            _kwargs = {}
+            if new_callable is not None:
+                Klass = new_callable
+            elif spec is not None or spec_set is not None:
+                this_spec = spec
+                if spec_set is not None:
+                    this_spec = spec_set
+                if _is_list(this_spec):
+                    not_callable = '__call__' not in this_spec
+                else:
+                    not_callable = not _callable(this_spec)
+                if not_callable:
+                    Klass = NonCallableMagicMock
+
+            if spec is not None:
+                _kwargs['spec'] = spec
+            if spec_set is not None:
+                _kwargs['spec_set'] = spec_set
+
+            # add a name to mocks
+            if (isinstance(Klass, type) and
+                issubclass(Klass, NonCallableMock) and self.attribute):
+                _kwargs['name'] = self.attribute
+
+            _kwargs.update(kwargs)
+            new = Klass(**_kwargs)
+
+            if inherit and _is_instance_mock(new):
+                # we can only tell if the instance should be callable if the
+                # spec is not a list
+                this_spec = spec
+                if spec_set is not None:
+                    this_spec = spec_set
+                if (not _is_list(this_spec) and not
+                    _instance_callable(this_spec)):
+                    Klass = NonCallableMagicMock
+
+                _kwargs.pop('name')
+                new.return_value = Klass(_new_parent=new, _new_name='()',
+                                         **_kwargs)
+        elif autospec is not None:
+            # spec is ignored, new *must* be default, spec_set is treated
+            # as a boolean. Should we check spec is not None and that spec_set
+            # is a bool?
+            if new is not DEFAULT:
+                raise TypeError(
+                    "autospec creates the mock for you. Can't specify "
+                    "autospec and new."
+                )
+            if original is DEFAULT:
+                raise TypeError("Can't use 'autospec' with create=True")
+            spec_set = bool(spec_set)
+            if autospec is True:
+                autospec = original
+
+            new = create_autospec(autospec, spec_set=spec_set,
+                                  _name=self.attribute, **kwargs)
+        elif kwargs:
+            # can't set keyword args when we aren't creating the mock
+            # XXXX If new is a Mock we could call new.configure_mock(**kwargs)
+            raise TypeError("Can't pass kwargs to a mock we aren't creating")
+
+        new_attr = new
+
+        self.temp_original = original
+        self.is_local = local
+        setattr(self.target, self.attribute, new_attr)
+        if self.attribute_name is not None:
+            extra_args = {}
+            if self.new is DEFAULT:
+                extra_args[self.attribute_name] =  new
+            for patching in self.additional_patchers:
+                arg = patching.__enter__()
+                if patching.new is DEFAULT:
+                    extra_args.update(arg)
+            return extra_args
+
+        return new
+
+
+    def __exit__(self, *exc_info):
+        """Undo the patch."""
+        if not _is_started(self):
+            raise RuntimeError('stop called on unstarted patcher')
+
+        if self.is_local and self.temp_original is not DEFAULT:
+            setattr(self.target, self.attribute, self.temp_original)
+        else:
+            delattr(self.target, self.attribute)
+            if not self.create and not hasattr(self.target, self.attribute):
+                # needed for proxy objects like django settings
+                setattr(self.target, self.attribute, self.temp_original)
+
+        del self.temp_original
+        del self.is_local
+        del self.target
+        for patcher in reversed(self.additional_patchers):
+            if _is_started(patcher):
+                patcher.__exit__(*exc_info)
+
+
+    def start(self):
+        """Activate a patch, returning any created mock."""
+        result = self.__enter__()
+        self._active_patches.add(self)
+        return result
+
+
+    def stop(self):
+        """Stop an active patch."""
+        self._active_patches.discard(self)
+        return self.__exit__()
+
+
+
+def _get_target(target):
+    try:
+        target, attribute = target.rsplit('.', 1)
+    except (TypeError, ValueError):
+        raise TypeError("Need a valid target to patch. You supplied: %r" %
+                        (target,))
+    getter = lambda: _importer(target)
+    return getter, attribute
+
+
+def _patch_object(
+        target, attribute, new=DEFAULT, spec=None,
+        create=False, spec_set=None, autospec=None,
+        new_callable=None, **kwargs
+    ):
+    """
+    patch.object(target, attribute, new=DEFAULT, spec=None, create=False,
+                 spec_set=None, autospec=None, new_callable=None, **kwargs)
+
+    patch the named member (`attribute`) on an object (`target`) with a mock
+    object.
+
+    `patch.object` can be used as a decorator, class decorator or a context
+    manager. Arguments `new`, `spec`, `create`, `spec_set`,
+    `autospec` and `new_callable` have the same meaning as for `patch`. Like
+    `patch`, `patch.object` takes arbitrary keyword arguments for configuring
+    the mock object it creates.
+
+    When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
+    for choosing which methods to wrap.
+    """
+    getter = lambda: target
+    return _patch(
+        getter, attribute, new, spec, create,
+        spec_set, autospec, new_callable, kwargs
+    )
+
+
+def _patch_multiple(target, spec=None, create=False, spec_set=None,
+                    autospec=None, new_callable=None, **kwargs):
+    """Perform multiple patches in a single call. It takes the object to be
+    patched (either as an object or a string to fetch the object by importing)
+    and keyword arguments for the patches::
+
+        with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
+            ...
+
+    Use `DEFAULT` as the value if you want `patch.multiple` to create
+    mocks for you. In this case the created mocks are passed into a decorated
+    function by keyword, and a dictionary is returned when `patch.multiple` is
+    used as a context manager.
+
+    `patch.multiple` can be used as a decorator, class decorator or a context
+    manager. The arguments `spec`, `spec_set`, `create`,
+    `autospec` and `new_callable` have the same meaning as for `patch`. These
+    arguments will be applied to *all* patches done by `patch.multiple`.
+
+    When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
+    for choosing which methods to wrap.
+    """
+    if type(target) in (unicode, str):
+        getter = lambda: _importer(target)
+    else:
+        getter = lambda: target
+
+    if not kwargs:
+        raise ValueError(
+            'Must supply at least one keyword argument with patch.multiple'
+        )
+    # need to wrap in a list for python 3, where items is a view
+    items = list(kwargs.items())
+    attribute, new = items[0]
+    patcher = _patch(
+        getter, attribute, new, spec, create, spec_set,
+        autospec, new_callable, {}
+    )
+    patcher.attribute_name = attribute
+    for attribute, new in items[1:]:
+        this_patcher = _patch(
+            getter, attribute, new, spec, create, spec_set,
+            autospec, new_callable, {}
+        )
+        this_patcher.attribute_name = attribute
+        patcher.additional_patchers.append(this_patcher)
+    return patcher
+
+
+def patch(
+        target, new=DEFAULT, spec=None, create=False,
+        spec_set=None, autospec=None, new_callable=None, **kwargs
+    ):
+    """
+    `patch` acts as a function decorator, class decorator or a context
+    manager. Inside the body of the function or with statement, the `target`
+    is patched with a `new` object. When the function/with statement exits
+    the patch is undone.
+
+    If `new` is omitted, then the target is replaced with a
+    `MagicMock`. If `patch` is used as a decorator and `new` is
+    omitted, the created mock is passed in as an extra argument to the
+    decorated function. If `patch` is used as a context manager the created
+    mock is returned by the context manager.
+
+    `target` should be a string in the form `'package.module.ClassName'`. The
+    `target` is imported and the specified object replaced with the `new`
+    object, so the `target` must be importable from the environment you are
+    calling `patch` from. The target is imported when the decorated function
+    is executed, not at decoration time.
+
+    The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
+    if patch is creating one for you.
+
+    In addition you can pass `spec=True` or `spec_set=True`, which causes
+    patch to pass in the object being mocked as the spec/spec_set object.
+
+    `new_callable` allows you to specify a different class, or callable object,
+    that will be called to create the `new` object. By default `MagicMock` is
+    used.
+
+    A more powerful form of `spec` is `autospec`. If you set `autospec=True`
+    then the mock with be created with a spec from the object being replaced.
+    All attributes of the mock will also have the spec of the corresponding
+    attribute of the object being replaced. Methods and functions being
+    mocked will have their arguments checked and will raise a `TypeError` if
+    they are called with the wrong signature. For mocks replacing a class,
+    their return value (the 'instance') will have the same spec as the class.
+
+    Instead of `autospec=True` you can pass `autospec=some_object` to use an
+    arbitrary object as the spec instead of the one being replaced.
+
+    By default `patch` will fail to replace attributes that don't exist. If
+    you pass in `create=True`, and the attribute doesn't exist, patch will
+    create the attribute for you when the patched function is called, and
+    delete it again afterwards. This is useful for writing tests against
+    attributes that your production code creates at runtime. It is off by by
+    default because it can be dangerous. With it switched on you can write
+    passing tests against APIs that don't actually exist!
+
+    Patch can be used as a `TestCase` class decorator. It works by
+    decorating each test method in the class. This reduces the boilerplate
+    code when your test methods share a common patchings set. `patch` finds
+    tests by looking for method names that start with `patch.TEST_PREFIX`.
+    By default this is `test`, which matches the way `unittest` finds tests.
+    You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
+
+    Patch can be used as a context manager, with the with statement. Here the
+    patching applies to the indented block after the with statement. If you
+    use "as" then the patched object will be bound to the name after the
+    "as"; very useful if `patch` is creating a mock object for you.
+
+    `patch` takes arbitrary keyword arguments. These will be passed to
+    the `Mock` (or `new_callable`) on construction.
+
+    `patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
+    available for alternate use-cases.
+    """
+    getter, attribute = _get_target(target)
+    return _patch(
+        getter, attribute, new, spec, create,
+        spec_set, autospec, new_callable, kwargs
+    )
+
+
+class _patch_dict(object):
+    """
+    Patch a dictionary, or dictionary like object, and restore the dictionary
+    to its original state after the test.
+
+    `in_dict` can be a dictionary or a mapping like container. If it is a
+    mapping then it must at least support getting, setting and deleting items
+    plus iterating over keys.
+
+    `in_dict` can also be a string specifying the name of the dictionary, which
+    will then be fetched by importing it.
+
+    `values` can be a dictionary of values to set in the dictionary. `values`
+    can also be an iterable of `(key, value)` pairs.
+
+    If `clear` is True then the dictionary will be cleared before the new
+    values are set.
+
+    `patch.dict` can also be called with arbitrary keyword arguments to set
+    values in the dictionary::
+
+        with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
+            ...
+
+    `patch.dict` can be used as a context manager, decorator or class
+    decorator. When used as a class decorator `patch.dict` honours
+    `patch.TEST_PREFIX` for choosing which methods to wrap.
+    """
+
+    def __init__(self, in_dict, values=(), clear=False, **kwargs):
+        if isinstance(in_dict, basestring):
+            in_dict = _importer(in_dict)
+        self.in_dict = in_dict
+        # support any argument supported by dict(...) constructor
+        self.values = dict(values)
+        self.values.update(kwargs)
+        self.clear = clear
+        self._original = None
+
+
+    def __call__(self, f):
+        if isinstance(f, ClassTypes):
+            return self.decorate_class(f)
+        @wraps(f)
+        def _inner(*args, **kw):
+            self._patch_dict()
+            try:
+                return f(*args, **kw)
+            finally:
+                self._unpatch_dict()
+
+        return _inner
+
+
+    def decorate_class(self, klass):
+        for attr in dir(klass):
+            attr_value = getattr(klass, attr)
+            if (attr.startswith(patch.TEST_PREFIX) and
+                 hasattr(attr_value, "__call__")):
+                decorator = _patch_dict(self.in_dict, self.values, self.clear)
+                decorated = decorator(attr_value)
+                setattr(klass, attr, decorated)
+        return klass
+
+
+    def __enter__(self):
+        """Patch the dict."""
+        self._patch_dict()
+
+
+    def _patch_dict(self):
+        values = self.values
+        in_dict = self.in_dict
+        clear = self.clear
+
+        try:
+            original = in_dict.copy()
+        except AttributeError:
+            # dict like object with no copy method
+            # must support iteration over keys
+            original = {}
+            for key in in_dict:
+                original[key] = in_dict[key]
+        self._original = original
+
+        if clear:
+            _clear_dict(in_dict)
+
+        try:
+            in_dict.update(values)
+        except AttributeError:
+            # dict like object with no update method
+            for key in values:
+                in_dict[key] = values[key]
+
+
+    def _unpatch_dict(self):
+        in_dict = self.in_dict
+        original = self._original
+
+        _clear_dict(in_dict)
+
+        try:
+            in_dict.update(original)
+        except AttributeError:
+            for key in original:
+                in_dict[key] = original[key]
+
+
+    def __exit__(self, *args):
+        """Unpatch the dict."""
+        self._unpatch_dict()
+        return False
+
+    start = __enter__
+    stop = __exit__
+
+
+def _clear_dict(in_dict):
+    try:
+        in_dict.clear()
+    except AttributeError:
+        keys = list(in_dict)
+        for key in keys:
+            del in_dict[key]
+
+
+def _patch_stopall():
+    """Stop all active patches."""
+    for patch in list(_patch._active_patches):
+        patch.stop()
+
+
+patch.object = _patch_object
+patch.dict = _patch_dict
+patch.multiple = _patch_multiple
+patch.stopall = _patch_stopall
+patch.TEST_PREFIX = 'test'
+
+magic_methods = (
+    "lt le gt ge eq ne "
+    "getitem setitem delitem "
+    "len contains iter "
+    "hash str sizeof "
+    "enter exit "
+    "divmod neg pos abs invert "
+    "complex int float index "
+    "trunc floor ceil "
+)
+
+numerics = "add sub mul div floordiv mod lshift rshift and xor or pow "
+inplace = ' '.join('i%s' % n for n in numerics.split())
+right = ' '.join('r%s' % n for n in numerics.split())
+extra = ''
+if inPy3k:
+    extra = 'bool next '
+else:
+    extra = 'unicode long nonzero oct hex truediv rtruediv '
+
+# not including __prepare__, __instancecheck__, __subclasscheck__
+# (as they are metaclass methods)
+# __del__ is not supported at all as it causes problems if it exists
+
+_non_defaults = set('__%s__' % method for method in [
+    'cmp', 'getslice', 'setslice', 'coerce', 'subclasses',
+    'format', 'get', 'set', 'delete', 'reversed',
+    'missing', 'reduce', 'reduce_ex', 'getinitargs',
+    'getnewargs', 'getstate', 'setstate', 'getformat',
+    'setformat', 'repr', 'dir'
+])
+
+
+def _get_method(name, func):
+    "Turns a callable object (like a mock) into a real function"
+    def method(self, *args, **kw):
+        return func(self, *args, **kw)
+    method.__name__ = name
+    return method
+
+
+_magics = set(
+    '__%s__' % method for method in
+    ' '.join([magic_methods, numerics, inplace, right, extra]).split()
+)
+
+_all_magics = _magics | _non_defaults
+
+_unsupported_magics = set([
+    '__getattr__', '__setattr__',
+    '__init__', '__new__', '__prepare__'
+    '__instancecheck__', '__subclasscheck__',
+    '__del__'
+])
+
+_calculate_return_value = {
+    '__hash__': lambda self: object.__hash__(self),
+    '__str__': lambda self: object.__str__(self),
+    '__sizeof__': lambda self: object.__sizeof__(self),
+    '__unicode__': lambda self: unicode(object.__str__(self)),
+}
+
+_return_values = {
+    '__lt__': NotImplemented,
+    '__gt__': NotImplemented,
+    '__le__': NotImplemented,
+    '__ge__': NotImplemented,
+    '__int__': 1,
+    '__contains__': False,
+    '__len__': 0,
+    '__exit__': False,
+    '__complex__': 1j,
+    '__float__': 1.0,
+    '__bool__': True,
+    '__nonzero__': True,
+    '__oct__': '1',
+    '__hex__': '0x1',
+    '__long__': long(1),
+    '__index__': 1,
+}
+
+
+def _get_eq(self):
+    def __eq__(other):
+        ret_val = self.__eq__._mock_return_value
+        if ret_val is not DEFAULT:
+            return ret_val
+        return self is other
+    return __eq__
+
+def _get_ne(self):
+    def __ne__(other):
+        if self.__ne__._mock_return_value is not DEFAULT:
+            return DEFAULT
+        return self is not other
+    return __ne__
+
+def _get_iter(self):
+    def __iter__():
+        ret_val = self.__iter__._mock_return_value
+        if ret_val is DEFAULT:
+            return iter([])
+        # if ret_val was already an iterator, then calling iter on it should
+        # return the iterator unchanged
+        return iter(ret_val)
+    return __iter__
+
+_side_effect_methods = {
+    '__eq__': _get_eq,
+    '__ne__': _get_ne,
+    '__iter__': _get_iter,
+}
+
+
+
+def _set_return_value(mock, method, name):
+    fixed = _return_values.get(name, DEFAULT)
+    if fixed is not DEFAULT:
+        method.return_value = fixed
+        return
+
+    return_calulator = _calculate_return_value.get(name)
+    if return_calulator is not None:
+        try:
+            return_value = return_calulator(mock)
+        except AttributeError:
+            # XXXX why do we return AttributeError here?
+            #      set it as a side_effect instead?
+            return_value = AttributeError(name)
+        method.return_value = return_value
+        return
+
+    side_effector = _side_effect_methods.get(name)
+    if side_effector is not None:
+        method.side_effect = side_effector(mock)
+
+
+
+class MagicMixin(object):
+    def __init__(self, *args, **kw):
+        _super(MagicMixin, self).__init__(*args, **kw)
+        self._mock_set_magics()
+
+
+    def _mock_set_magics(self):
+        these_magics = _magics
+
+        if self._mock_methods is not None:
+            these_magics = _magics.intersection(self._mock_methods)
+
+            remove_magics = set()
+            remove_magics = _magics - these_magics
+
+            for entry in remove_magics:
+                if entry in type(self).__dict__:
+                    # remove unneeded magic methods
+                    delattr(self, entry)
+
+        # don't overwrite existing attributes if called a second time
+        these_magics = these_magics - set(type(self).__dict__)
+
+        _type = type(self)
+        for entry in these_magics:
+            setattr(_type, entry, MagicProxy(entry, self))
+
+
+
+class NonCallableMagicMock(MagicMixin, NonCallableMock):
+    """A version of `MagicMock` that isn't callable."""
+    def mock_add_spec(self, spec, spec_set=False):
+        """Add a spec to a mock. `spec` can either be an object or a
+        list of strings. Only attributes on the `spec` can be fetched as
+        attributes from the mock.
+
+        If `spec_set` is True then only attributes on the spec can be set."""
+        self._mock_add_spec(spec, spec_set)
+        self._mock_set_magics()
+
+
+
+class MagicMock(MagicMixin, Mock):
+    """
+    MagicMock is a subclass of Mock with default implementations
+    of most of the magic methods. You can use MagicMock without having to
+    configure the magic methods yourself.
+
+    If you use the `spec` or `spec_set` arguments then *only* magic
+    methods that exist in the spec will be created.
+
+    Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
+    """
+    def mock_add_spec(self, spec, spec_set=False):
+        """Add a spec to a mock. `spec` can either be an object or a
+        list of strings. Only attributes on the `spec` can be fetched as
+        attributes from the mock.
+
+        If `spec_set` is True then only attributes on the spec can be set."""
+        self._mock_add_spec(spec, spec_set)
+        self._mock_set_magics()
+
+
+
+class MagicProxy(object):
+    def __init__(self, name, parent):
+        self.name = name
+        self.parent = parent
+
+    def __call__(self, *args, **kwargs):
+        m = self.create_mock()
+        return m(*args, **kwargs)
+
+    def create_mock(self):
+        entry = self.name
+        parent = self.parent
+        m = parent._get_child_mock(name=entry, _new_name=entry,
+                                   _new_parent=parent)
+        setattr(parent, entry, m)
+        _set_return_value(parent, m, entry)
+        return m
+
+    def __get__(self, obj, _type=None):
+        return self.create_mock()
+
+
+
+class _ANY(object):
+    "A helper object that compares equal to everything."
+
+    def __eq__(self, other):
+        return True
+
+    def __ne__(self, other):
+        return False
+
+    def __repr__(self):
+        return '<ANY>'
+
+ANY = _ANY()
+
+
+
+def _format_call_signature(name, args, kwargs):
+    message = '%s(%%s)' % name
+    formatted_args = ''
+    args_string = ', '.join([repr(arg) for arg in args])
+    kwargs_string = ', '.join([
+        '%s=%r' % (key, value) for key, value in kwargs.items()
+    ])
+    if args_string:
+        formatted_args = args_string
+    if kwargs_string:
+        if formatted_args:
+            formatted_args += ', '
+        formatted_args += kwargs_string
+
+    return message % formatted_args
+
+
+
+class _Call(tuple):
+    """
+    A tuple for holding the results of a call to a mock, either in the form
+    `(args, kwargs)` or `(name, args, kwargs)`.
+
+    If args or kwargs are empty then a call tuple will compare equal to
+    a tuple without those values. This makes comparisons less verbose::
+
+        _Call(('name', (), {})) == ('name',)
+        _Call(('name', (1,), {})) == ('name', (1,))
+        _Call(((), {'a': 'b'})) == ({'a': 'b'},)
+
+    The `_Call` object provides a useful shortcut for comparing with call::
+
+        _Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
+        _Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
+
+    If the _Call has no name then it will match any name.
+    """
+    def __new__(cls, value=(), name=None, parent=None, two=False,
+                from_kall=True):
+        name = ''
+        args = ()
+        kwargs = {}
+        _len = len(value)
+        if _len == 3:
+            name, args, kwargs = value
+        elif _len == 2:
+            first, second = value
+            if isinstance(first, basestring):
+                name = first
+                if isinstance(second, tuple):
+                    args = second
+                else:
+                    kwargs = second
+            else:
+                args, kwargs = first, second
+        elif _len == 1:
+            value, = value
+            if isinstance(value, basestring):
+                name = value
+            elif isinstance(value, tuple):
+                args = value
+            else:
+                kwargs = value
+
+        if two:
+            return tuple.__new__(cls, (args, kwargs))
+
+        return tuple.__new__(cls, (name, args, kwargs))
+
+
+    def __init__(self, value=(), name=None, parent=None, two=False,
+                 from_kall=True):
+        self.name = name
+        self.parent = parent
+        self.from_kall = from_kall
+
+
+    def __eq__(self, other):
+        if other is ANY:
+            return True
+        try:
+            len_other = len(other)
+        except TypeError:
+            return False
+
+        self_name = ''
+        if len(self) == 2:
+            self_args, self_kwargs = self
+        else:
+            self_name, self_args, self_kwargs = self
+
+        other_name = ''
+        if len_other == 0:
+            other_args, other_kwargs = (), {}
+        elif len_other == 3:
+            other_name, other_args, other_kwargs = other
+        elif len_other == 1:
+            value, = other
+            if isinstance(value, tuple):
+                other_args = value
+                other_kwargs = {}
+            elif isinstance(value, basestring):
+                other_name = value
+                other_args, other_kwargs = (), {}
+            else:
+                other_args = ()
+                other_kwargs = value
+        else:
+            # len 2
+            # could be (name, args) or (name, kwargs) or (args, kwargs)
+            first, second = other
+            if isinstance(first, basestring):
+                other_name = first
+                if isinstance(second, tuple):
+                    other_args, other_kwargs = second, {}
+                else:
+                    other_args, other_kwargs = (), second
+            else:
+                other_args, other_kwargs = first, second
+
+        if self_name and other_name != self_name:
+            return False
+
+        # this order is important for ANY to work!
+        return (other_args, other_kwargs) == (self_args, self_kwargs)
+
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+
+    def __call__(self, *args, **kwargs):
+        if self.name is None:
+            return _Call(('', args, kwargs), name='()')
+
+        name = self.name + '()'
+        return _Call((self.name, args, kwargs), name=name, parent=self)
+
+
+    def __getattr__(self, attr):
+        if self.name is None:
+            return _Call(name=attr, from_kall=False)
+        name = '%s.%s' % (self.name, attr)
+        return _Call(name=name, parent=self, from_kall=False)
+
+
+    def __repr__(self):
+        if not self.from_kall:
+            name = self.name or 'call'
+            if name.startswith('()'):
+                name = 'call%s' % name
+            return name
+
+        if len(self) == 2:
+            name = 'call'
+            args, kwargs = self
+        else:
+            name, args, kwargs = self
+            if not name:
+                name = 'call'
+            elif not name.startswith('()'):
+                name = 'call.%s' % name
+            else:
+                name = 'call%s' % name
+        return _format_call_signature(name, args, kwargs)
+
+
+    def call_list(self):
+        """For a call object that represents multiple calls, `call_list`
+        returns a list of all the intermediate calls as well as the
+        final call."""
+        vals = []
+        thing = self
+        while thing is not None:
+            if thing.from_kall:
+                vals.append(thing)
+            thing = thing.parent
+        return _CallList(reversed(vals))
+
+
+call = _Call(from_kall=False)
+
+
+
+def create_autospec(spec, spec_set=False, instance=False, _parent=None,
+                    _name=None, **kwargs):
+    """Create a mock object using another object as a spec. Attributes on the
+    mock will use the corresponding attribute on the `spec` object as their
+    spec.
+
+    Functions or methods being mocked will have their arguments checked
+    to check that they are called with the correct signature.
+
+    If `spec_set` is True then attempting to set attributes that don't exist
+    on the spec object will raise an `AttributeError`.
+
+    If a class is used as a spec then the return value of the mock (the
+    instance of the class) will have the same spec. You can use a class as the
+    spec for an instance object by passing `instance=True`. The returned mock
+    will only be callable if instances of the mock are callable.
+
+    `create_autospec` also takes arbitrary keyword arguments that are passed to
+    the constructor of the created mock."""
+    if _is_list(spec):
+        # can't pass a list instance to the mock constructor as it will be
+        # interpreted as a list of strings
+        spec = type(spec)
+
+    is_type = isinstance(spec, ClassTypes)
+
+    _kwargs = {'spec': spec}
+    if spec_set:
+        _kwargs = {'spec_set': spec}
+    elif spec is None:
+        # None we mock with a normal mock without a spec
+        _kwargs = {}
+
+    _kwargs.update(kwargs)
+
+    Klass = MagicMock
+    if type(spec) in DescriptorTypes:
+        # descriptors don't have a spec
+        # because we don't know what type they return
+        _kwargs = {}
+    elif not _callable(spec):
+        Klass = NonCallableMagicMock
+    elif is_type and instance and not _instance_callable(spec):
+        Klass = NonCallableMagicMock
+
+    _new_name = _name
+    if _parent is None:
+        # for a top level object no _new_name should be set
+        _new_name = ''
+
+    mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
+                 name=_name, **_kwargs)
+
+    if isinstance(spec, FunctionTypes):
+        # should only happen at the top level because we don't
+        # recurse for functions
+        mock = _set_signature(mock, spec)
+    else:
+        _check_signature(spec, mock, is_type, instance)
+
+    if _parent is not None and not instance:
+        _parent._mock_children[_name] = mock
+
+    if is_type and not instance and 'return_value' not in kwargs:
+        mock.return_value = create_autospec(spec, spec_set, instance=True,
+                                            _name='()', _parent=mock)
+
+    for entry in dir(spec):
+        if _is_magic(entry):
+            # MagicMock already does the useful magic methods for us
+            continue
+
+        if isinstance(spec, FunctionTypes) and entry in FunctionAttributes:
+            # allow a mock to actually be a function
+            continue
+
+        # XXXX do we need a better way of getting attributes without
+        # triggering code execution (?) Probably not - we need the actual
+        # object to mock it so we would rather trigger a property than mock
+        # the property descriptor. Likewise we want to mock out dynamically
+        # provided attributes.
+        # XXXX what about attributes that raise exceptions other than
+        # AttributeError on being fetched?
+        # we could be resilient against it, or catch and propagate the
+        # exception when the attribute is fetched from the mock
+        try:
+            original = getattr(spec, entry)
+        except AttributeError:
+            continue
+
+        kwargs = {'spec': original}
+        if spec_set:
+            kwargs = {'spec_set': original}
+
+        if not isinstance(original, FunctionTypes):
+            new = _SpecState(original, spec_set, mock, entry, instance)
+            mock._mock_children[entry] = new
+        else:
+            parent = mock
+            if isinstance(spec, FunctionTypes):
+                parent = mock.mock
+
+            new = MagicMock(parent=parent, name=entry, _new_name=entry,
+                            _new_parent=parent, **kwargs)
+            mock._mock_children[entry] = new
+            skipfirst = _must_skip(spec, entry, is_type)
+            _check_signature(original, new, skipfirst=skipfirst)
+
+        # so functions created with _set_signature become instance attributes,
+        # *plus* their underlying mock exists in _mock_children of the parent
+        # mock. Adding to _mock_children may be unnecessary where we are also
+        # setting as an instance attribute?
+        if isinstance(new, FunctionTypes):
+            setattr(mock, entry, new)
+
+    return mock
+
+
+def _must_skip(spec, entry, is_type):
+    if not isinstance(spec, ClassTypes):
+        if entry in getattr(spec, '__dict__', {}):
+            # instance attribute - shouldn't skip
+            return False
+        spec = spec.__class__
+    if not hasattr(spec, '__mro__'):
+        # old style class: can't have descriptors anyway
+        return is_type
+
+    for klass in spec.__mro__:
+        result = klass.__dict__.get(entry, DEFAULT)
+        if result is DEFAULT:
+            continue
+        if isinstance(result, (staticmethod, classmethod)):
+            return False
+        return is_type
+
+    # shouldn't get here unless function is a dynamically provided attribute
+    # XXXX untested behaviour
+    return is_type
+
+
+def _get_class(obj):
+    try:
+        return obj.__class__
+    except AttributeError:
+        # in Python 2, _sre.SRE_Pattern objects have no __class__
+        return type(obj)
+
+
+class _SpecState(object):
+
+    def __init__(self, spec, spec_set=False, parent=None,
+                 name=None, ids=None, instance=False):
+        self.spec = spec
+        self.ids = ids
+        self.spec_set = spec_set
+        self.parent = parent
+        self.instance = instance
+        self.name = name
+
+
+FunctionTypes = (
+    # python function
+    type(create_autospec),
+    # instance method
+    type(ANY.__eq__),
+    # unbound method
+    type(_ANY.__eq__),
+)
+
+FunctionAttributes = set([
+    'func_closure',
+    'func_code',
+    'func_defaults',
+    'func_dict',
+    'func_doc',
+    'func_globals',
+    'func_name',
+])
+
+
+file_spec = None
+
+
+def mock_open(mock=None, read_data=''):
+    """
+    A helper function to create a mock to replace the use of `open`. It works
+    for `open` called directly or used as a context manager.
+
+    The `mock` argument is the mock object to configure. If `None` (the
+    default) then a `MagicMock` will be created for you, with the API limited
+    to methods or attributes available on standard file handles.
+
+    `read_data` is a string for the `read` method of the file handle to return.
+    This is an empty string by default.
+    """
+    global file_spec
+    if file_spec is None:
+        # set on first use
+        if inPy3k:
+            import _io
+            file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+        else:
+            file_spec = file
+
+    if mock is None:
+        mock = MagicMock(name='open', spec=open)
+
+    handle = MagicMock(spec=file_spec)
+    handle.write.return_value = None
+    handle.__enter__.return_value = handle
+    handle.read.return_value = read_data
+
+    mock.return_value = handle
+    return mock
+
+
+class PropertyMock(Mock):
+    """
+    A mock intended to be used as a property, or other descriptor, on a class.
+    `PropertyMock` provides `__get__` and `__set__` methods so you can specify
+    a return value when it is fetched.
+
+    Fetching a `PropertyMock` instance from an object calls the mock, with
+    no args. Setting it calls the mock with the value being set.
+    """
+    def _get_child_mock(self, **kwargs):
+        return MagicMock(**kwargs)
+
+    def __get__(self, obj, obj_type):
+        return self()
+    def __set__(self, obj, val):
+        self(val)
diff --git a/utils/frozen_chromite/third_party/oauth2client/LICENSE b/utils/frozen_chromite/third_party/oauth2client/LICENSE
new file mode 100644
index 0000000..b506d50
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/LICENSE
@@ -0,0 +1,22 @@
+ Copyright 2014 Google Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+Dependent Modules
+=================
+
+This code has the following dependencies
+above and beyond the Python standard library:
+
+uritemplates - Apache License 2.0
+httplib2 - MIT License
diff --git a/utils/frozen_chromite/third_party/oauth2client/README.md b/utils/frozen_chromite/third_party/oauth2client/README.md
new file mode 100644
index 0000000..005aff5
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/README.md
@@ -0,0 +1,29 @@
+[![Build Status](https://travis-ci.org/google/oauth2client.svg?branch=master)](https://travis-ci.org/google/oauth2client)
+[![Coverage Status](https://coveralls.io/repos/google/oauth2client/badge.svg?branch=master&service=github)](https://coveralls.io/github/google/oauth2client?branch=master)
+[![Documentation Status](https://readthedocs.org/projects/oauth2client/badge/?version=latest)](http://oauth2client.readthedocs.org/)
+
+This is a client library for accessing resources protected by OAuth 2.0.
+
+Installation
+============
+
+To install, simply say
+
+```bash
+$ pip install --upgrade oauth2client
+```
+
+Contributing
+============
+
+Please see the [CONTRIBUTING page][1] for more information. In particular, we
+love pull requests -- but please make sure to sign the contributor license
+agreement.
+
+Supported Python Versions
+=========================
+
+We support Python 2.6, 2.7, 3.3+. More information [in the docs][2].
+
+[1]: https://github.com/google/oauth2client/blob/master/CONTRIBUTING.md
+[2]: http://oauth2client.readthedocs.org/#supported-python-versions
diff --git a/utils/frozen_chromite/third_party/oauth2client/README.swarming b/utils/frozen_chromite/third_party/oauth2client/README.swarming
new file mode 100644
index 0000000..9193d48
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/README.swarming
@@ -0,0 +1,14 @@
+Name: oauth2client
+Short Name: oauth2client
+URL: https://github.com/google/oauth2client/archive/v1.5.2.tar.gz
+Version: 1.5.2
+Revision: 73d9d55447de97dfe541395817a0c8241701f7d6
+License: Apache License, Version 2.0
+
+Description:
+The oauth2client is a client library for OAuth 2.0.
+
+Local Modifications:
+- Kept oauth2client/.
+- Removed: appengine.py devshell.py django_orm.py flask_util.py
+- Kept LICENSE and README.md.
diff --git a/utils/frozen_chromite/third_party/oauth2client/__init__.py b/utils/frozen_chromite/third_party/oauth2client/__init__.py
new file mode 100644
index 0000000..f7c36c1
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/__init__.py
@@ -0,0 +1,23 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client library for using OAuth2, especially with Google APIs."""
+
+__version__ = '1.5.2'
+
+GOOGLE_AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
+GOOGLE_DEVICE_URI = 'https://accounts.google.com/o/oauth2/device/code'
+GOOGLE_REVOKE_URI = 'https://accounts.google.com/o/oauth2/revoke'
+GOOGLE_TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
+GOOGLE_TOKEN_INFO_URI = 'https://www.googleapis.com/oauth2/v2/tokeninfo'
diff --git a/utils/frozen_chromite/third_party/oauth2client/_helpers.py b/utils/frozen_chromite/third_party/oauth2client/_helpers.py
new file mode 100644
index 0000000..39bfeb6
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/_helpers.py
@@ -0,0 +1,103 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Helper functions for commonly used utilities."""
+
+import base64
+import json
+import six
+
+
+def _parse_pem_key(raw_key_input):
+    """Identify and extract PEM keys.
+
+    Determines whether the given key is in the format of PEM key, and extracts
+    the relevant part of the key if it is.
+
+    Args:
+        raw_key_input: The contents of a private key file (either PEM or
+                       PKCS12).
+
+    Returns:
+        string, The actual key if the contents are from a PEM file, or
+        else None.
+    """
+    offset = raw_key_input.find(b'-----BEGIN ')
+    if offset != -1:
+        return raw_key_input[offset:]
+
+
+def _json_encode(data):
+    return json.dumps(data, separators=(',', ':'))
+
+
+def _to_bytes(value, encoding='ascii'):
+    """Converts a string value to bytes, if necessary.
+
+    Unfortunately, ``six.b`` is insufficient for this task since in
+    Python2 it does not modify ``unicode`` objects.
+
+    Args:
+        value: The string/bytes value to be converted.
+        encoding: The encoding to use to convert unicode to bytes. Defaults
+                  to "ascii", which will not allow any characters from ordinals
+                  larger than 127. Other useful values are "latin-1", which
+                  which will only allows byte ordinals (up to 255) and "utf-8",
+                  which will encode any unicode that needs to be.
+
+    Returns:
+        The original value converted to bytes (if unicode) or as passed in
+        if it started out as bytes.
+
+    Raises:
+        ValueError if the value could not be converted to bytes.
+    """
+    result = (value.encode(encoding)
+              if isinstance(value, six.text_type) else value)
+    if isinstance(result, six.binary_type):
+        return result
+    else:
+        raise ValueError('%r could not be converted to bytes' % (value,))
+
+
+def _from_bytes(value):
+    """Converts bytes to a string value, if necessary.
+
+    Args:
+        value: The string/bytes value to be converted.
+
+    Returns:
+        The original value converted to unicode (if bytes) or as passed in
+        if it started out as unicode.
+
+    Raises:
+        ValueError if the value could not be converted to unicode.
+    """
+    result = (value.decode('utf-8')
+              if isinstance(value, six.binary_type) else value)
+    if isinstance(result, six.text_type):
+        return result
+    else:
+        raise ValueError('%r could not be converted to unicode' % (value,))
+
+
+def _urlsafe_b64encode(raw_bytes):
+    raw_bytes = _to_bytes(raw_bytes, encoding='utf-8')
+    return base64.urlsafe_b64encode(raw_bytes).rstrip(b'=')
+
+
+def _urlsafe_b64decode(b64string):
+    # Guard against unicode strings, which base64 can't handle.
+    b64string = _to_bytes(b64string)
+    padded = b64string + b'=' * (4 - len(b64string) % 4)
+    return base64.urlsafe_b64decode(padded)
diff --git a/utils/frozen_chromite/third_party/oauth2client/_openssl_crypt.py b/utils/frozen_chromite/third_party/oauth2client/_openssl_crypt.py
new file mode 100644
index 0000000..d024cf3
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/_openssl_crypt.py
@@ -0,0 +1,139 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""OpenSSL Crypto-related routines for oauth2client."""
+
+import base64
+
+from OpenSSL import crypto
+
+from oauth2client._helpers import _parse_pem_key
+from oauth2client._helpers import _to_bytes
+
+
+class OpenSSLVerifier(object):
+    """Verifies the signature on a message."""
+
+    def __init__(self, pubkey):
+        """Constructor.
+
+        Args:
+            pubkey: OpenSSL.crypto.PKey, The public key to verify with.
+        """
+        self._pubkey = pubkey
+
+    def verify(self, message, signature):
+        """Verifies a message against a signature.
+
+        Args:
+        message: string or bytes, The message to verify. If string, will be
+                 encoded to bytes as utf-8.
+        signature: string or bytes, The signature on the message. If string,
+                   will be encoded to bytes as utf-8.
+
+        Returns:
+            True if message was signed by the private key associated with the
+            public key that this object was constructed with.
+        """
+        message = _to_bytes(message, encoding='utf-8')
+        signature = _to_bytes(signature, encoding='utf-8')
+        try:
+            crypto.verify(self._pubkey, signature, message, 'sha256')
+            return True
+        except crypto.Error:
+            return False
+
+    @staticmethod
+    def from_string(key_pem, is_x509_cert):
+        """Construct a Verified instance from a string.
+
+        Args:
+            key_pem: string, public key in PEM format.
+            is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+                          is expected to be an RSA key in PEM format.
+
+        Returns:
+            Verifier instance.
+
+        Raises:
+            OpenSSL.crypto.Error: if the key_pem can't be parsed.
+        """
+        if is_x509_cert:
+            pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem)
+        else:
+            pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem)
+        return OpenSSLVerifier(pubkey)
+
+
+class OpenSSLSigner(object):
+    """Signs messages with a private key."""
+
+    def __init__(self, pkey):
+        """Constructor.
+
+        Args:
+            pkey: OpenSSL.crypto.PKey (or equiv), The private key to sign with.
+        """
+        self._key = pkey
+
+    def sign(self, message):
+        """Signs a message.
+
+        Args:
+            message: bytes, Message to be signed.
+
+        Returns:
+            string, The signature of the message for the given key.
+        """
+        message = _to_bytes(message, encoding='utf-8')
+        return crypto.sign(self._key, message, 'sha256')
+
+    @staticmethod
+    def from_string(key, password=b'notasecret'):
+        """Construct a Signer instance from a string.
+
+        Args:
+            key: string, private key in PKCS12 or PEM format.
+            password: string, password for the private key file.
+
+        Returns:
+            Signer instance.
+
+        Raises:
+            OpenSSL.crypto.Error if the key can't be parsed.
+        """
+        parsed_pem_key = _parse_pem_key(key)
+        if parsed_pem_key:
+            pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, parsed_pem_key)
+        else:
+            password = _to_bytes(password, encoding='utf-8')
+            pkey = crypto.load_pkcs12(key, password).get_privatekey()
+        return OpenSSLSigner(pkey)
+
+
+def pkcs12_key_as_pem(private_key_text, private_key_password):
+    """Convert the contents of a PKCS12 key to PEM using OpenSSL.
+
+    Args:
+        private_key_text: String. Private key.
+        private_key_password: String. Password for PKCS12.
+
+    Returns:
+        String. PEM contents of ``private_key_text``.
+    """
+    decoded_body = base64.b64decode(private_key_text)
+    private_key_password = _to_bytes(private_key_password)
+
+    pkcs12 = crypto.load_pkcs12(decoded_body, private_key_password)
+    return crypto.dump_privatekey(crypto.FILETYPE_PEM,
+                                  pkcs12.get_privatekey())
diff --git a/utils/frozen_chromite/third_party/oauth2client/_pycrypto_crypt.py b/utils/frozen_chromite/third_party/oauth2client/_pycrypto_crypt.py
new file mode 100644
index 0000000..7b277aa
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/_pycrypto_crypt.py
@@ -0,0 +1,128 @@
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""pyCrypto Crypto-related routines for oauth2client."""
+
+from Crypto.PublicKey import RSA
+from Crypto.Hash import SHA256
+from Crypto.Signature import PKCS1_v1_5
+from Crypto.Util.asn1 import DerSequence
+
+from oauth2client._helpers import _parse_pem_key
+from oauth2client._helpers import _to_bytes
+from oauth2client._helpers import _urlsafe_b64decode
+
+
+class PyCryptoVerifier(object):
+    """Verifies the signature on a message."""
+
+    def __init__(self, pubkey):
+        """Constructor.
+
+        Args:
+            pubkey: OpenSSL.crypto.PKey (or equiv), The public key to verify
+            with.
+        """
+        self._pubkey = pubkey
+
+    def verify(self, message, signature):
+        """Verifies a message against a signature.
+
+        Args:
+            message: string or bytes, The message to verify. If string, will be
+                     encoded to bytes as utf-8.
+            signature: string or bytes, The signature on the message.
+
+        Returns:
+            True if message was signed by the private key associated with the
+            public key that this object was constructed with.
+        """
+        message = _to_bytes(message, encoding='utf-8')
+        return PKCS1_v1_5.new(self._pubkey).verify(
+            SHA256.new(message), signature)
+
+    @staticmethod
+    def from_string(key_pem, is_x509_cert):
+        """Construct a Verified instance from a string.
+
+        Args:
+            key_pem: string, public key in PEM format.
+            is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it
+                          is expected to be an RSA key in PEM format.
+
+        Returns:
+            Verifier instance.
+        """
+        if is_x509_cert:
+            key_pem = _to_bytes(key_pem)
+            pemLines = key_pem.replace(b' ', b'').split()
+            certDer = _urlsafe_b64decode(b''.join(pemLines[1:-1]))
+            certSeq = DerSequence()
+            certSeq.decode(certDer)
+            tbsSeq = DerSequence()
+            tbsSeq.decode(certSeq[0])
+            pubkey = RSA.importKey(tbsSeq[6])
+        else:
+            pubkey = RSA.importKey(key_pem)
+        return PyCryptoVerifier(pubkey)
+
+
+class PyCryptoSigner(object):
+    """Signs messages with a private key."""
+
+    def __init__(self, pkey):
+        """Constructor.
+
+        Args:
+            pkey, OpenSSL.crypto.PKey (or equiv), The private key to sign with.
+        """
+        self._key = pkey
+
+    def sign(self, message):
+        """Signs a message.
+
+        Args:
+            message: string, Message to be signed.
+
+        Returns:
+            string, The signature of the message for the given key.
+        """
+        message = _to_bytes(message, encoding='utf-8')
+        return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
+
+    @staticmethod
+    def from_string(key, password='notasecret'):
+        """Construct a Signer instance from a string.
+
+        Args:
+            key: string, private key in PEM format.
+            password: string, password for private key file. Unused for PEM
+                      files.
+
+        Returns:
+            Signer instance.
+
+        Raises:
+            NotImplementedError if the key isn't in PEM format.
+        """
+        parsed_pem_key = _parse_pem_key(key)
+        if parsed_pem_key:
+            pkey = RSA.importKey(parsed_pem_key)
+        else:
+            raise NotImplementedError(
+                'PKCS12 format is not supported by the PyCrypto library. '
+                'Try converting to a "PEM" '
+                '(openssl pkcs12 -in xxxxx.p12 -nodes -nocerts > '
+                'privatekey.pem) '
+                'or using PyOpenSSL if native code is an option.')
+        return PyCryptoSigner(pkey)
diff --git a/utils/frozen_chromite/third_party/oauth2client/client.py b/utils/frozen_chromite/third_party/oauth2client/client.py
new file mode 100644
index 0000000..0bfe004
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/client.py
@@ -0,0 +1,2243 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""An OAuth 2.0 client.
+
+Tools for interacting with OAuth 2.0 protected resources.
+"""
+
+import base64
+import collections
+import copy
+import datetime
+import json
+import logging
+import os
+import socket
+import sys
+import tempfile
+import time
+import shutil
+import six
+from six.moves import urllib
+
+import httplib2
+from oauth2client import GOOGLE_AUTH_URI
+from oauth2client import GOOGLE_DEVICE_URI
+from oauth2client import GOOGLE_REVOKE_URI
+from oauth2client import GOOGLE_TOKEN_URI
+from oauth2client import GOOGLE_TOKEN_INFO_URI
+from oauth2client._helpers import _from_bytes
+from oauth2client._helpers import _to_bytes
+from oauth2client._helpers import _urlsafe_b64decode
+from oauth2client import clientsecrets
+from oauth2client import util
+
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+HAS_OPENSSL = False
+HAS_CRYPTO = False
+try:
+    from oauth2client import crypt
+    HAS_CRYPTO = True
+    if crypt.OpenSSLVerifier is not None:
+        HAS_OPENSSL = True
+except ImportError:
+    pass
+
+
+logger = logging.getLogger(__name__)
+
+# Expiry is stored in RFC3339 UTC format
+EXPIRY_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
+
+# Which certs to use to validate id_tokens received.
+ID_TOKEN_VERIFICATION_CERTS = 'https://www.googleapis.com/oauth2/v1/certs'
+# This symbol previously had a typo in the name; we keep the old name
+# around for now, but will remove it in the future.
+ID_TOKEN_VERIFICATON_CERTS = ID_TOKEN_VERIFICATION_CERTS
+
+# Constant to use for the out of band OAuth 2.0 flow.
+OOB_CALLBACK_URN = 'urn:ietf:wg:oauth:2.0:oob'
+
+# Google Data client libraries may need to set this to [401, 403].
+REFRESH_STATUS_CODES = [401]
+
+# The value representing user credentials.
+AUTHORIZED_USER = 'authorized_user'
+
+# The value representing service account credentials.
+SERVICE_ACCOUNT = 'service_account'
+
+# The environment variable pointing the file with local
+# Application Default Credentials.
+GOOGLE_APPLICATION_CREDENTIALS = 'GOOGLE_APPLICATION_CREDENTIALS'
+# The ~/.config subdirectory containing gcloud credentials. Intended
+# to be swapped out in tests.
+_CLOUDSDK_CONFIG_DIRECTORY = 'gcloud'
+# The environment variable name which can replace ~/.config if set.
+_CLOUDSDK_CONFIG_ENV_VAR = 'CLOUDSDK_CONFIG'
+
+# The error message we show users when we can't find the Application
+# Default Credentials.
+ADC_HELP_MSG = (
+    'The Application Default Credentials are not available. They are '
+    'available if running in Google Compute Engine. Otherwise, the '
+    'environment variable ' +
+    GOOGLE_APPLICATION_CREDENTIALS +
+    ' must be defined pointing to a file defining the credentials. See '
+    'https://developers.google.com/accounts/docs/'
+    'application-default-credentials for more information.')
+
+# The access token along with the seconds in which it expires.
+AccessTokenInfo = collections.namedtuple(
+    'AccessTokenInfo', ['access_token', 'expires_in'])
+
+DEFAULT_ENV_NAME = 'UNKNOWN'
+
+# If set to True _get_environment avoid GCE check (_detect_gce_environment)
+NO_GCE_CHECK = os.environ.setdefault('NO_GCE_CHECK', 'False')
+
+_SERVER_SOFTWARE = 'SERVER_SOFTWARE'
+_GCE_METADATA_HOST = '169.254.169.254'
+_METADATA_FLAVOR_HEADER = 'Metadata-Flavor'
+_DESIRED_METADATA_FLAVOR = 'Google'
+
+
+class SETTINGS(object):
+    """Settings namespace for globally defined values."""
+    env_name = None
+
+
+class Error(Exception):
+    """Base error for this module."""
+
+
+class FlowExchangeError(Error):
+    """Error trying to exchange an authorization grant for an access token."""
+
+
+class AccessTokenRefreshError(Error):
+    """Error trying to refresh an expired access token."""
+
+
+class HttpAccessTokenRefreshError(AccessTokenRefreshError):
+    """Error (with HTTP status) trying to refresh an expired access token."""
+    def __init__(self, *args, **kwargs):
+        super(HttpAccessTokenRefreshError, self).__init__(*args)
+        self.status = kwargs.get('status')
+
+
+class TokenRevokeError(Error):
+    """Error trying to revoke a token."""
+
+
+class UnknownClientSecretsFlowError(Error):
+    """The client secrets file called for an unknown type of OAuth 2.0 flow."""
+
+
+class AccessTokenCredentialsError(Error):
+    """Having only the access_token means no refresh is possible."""
+
+
+class VerifyJwtTokenError(Error):
+    """Could not retrieve certificates for validation."""
+
+
+class NonAsciiHeaderError(Error):
+    """Header names and values must be ASCII strings."""
+
+
+class ApplicationDefaultCredentialsError(Error):
+    """Error retrieving the Application Default Credentials."""
+
+
+class OAuth2DeviceCodeError(Error):
+    """Error trying to retrieve a device code."""
+
+
+class CryptoUnavailableError(Error, NotImplementedError):
+    """Raised when a crypto library is required, but none is available."""
+
+
+def _abstract():
+    raise NotImplementedError('You need to override this function')
+
+
+class MemoryCache(object):
+    """httplib2 Cache implementation which only caches locally."""
+
+    def __init__(self):
+        self.cache = {}
+
+    def get(self, key):
+        return self.cache.get(key)
+
+    def set(self, key, value):
+        self.cache[key] = value
+
+    def delete(self, key):
+        self.cache.pop(key, None)
+
+
+class Credentials(object):
+    """Base class for all Credentials objects.
+
+    Subclasses must define an authorize() method that applies the credentials
+    to an HTTP transport.
+
+    Subclasses must also specify a classmethod named 'from_json' that takes a
+    JSON string as input and returns an instantiated Credentials object.
+    """
+
+    NON_SERIALIZED_MEMBERS = ['store']
+
+    def authorize(self, http):
+        """Take an httplib2.Http instance (or equivalent) and authorizes it.
+
+        Authorizes it for the set of credentials, usually by replacing
+        http.request() with a method that adds in the appropriate headers and
+        then delegates to the original Http.request() method.
+
+        Args:
+            http: httplib2.Http, an http object to be used to make the refresh
+                  request.
+        """
+        _abstract()
+
+    def refresh(self, http):
+        """Forces a refresh of the access_token.
+
+        Args:
+            http: httplib2.Http, an http object to be used to make the refresh
+                  request.
+        """
+        _abstract()
+
+    def revoke(self, http):
+        """Revokes a refresh_token and makes the credentials void.
+
+        Args:
+            http: httplib2.Http, an http object to be used to make the revoke
+                  request.
+        """
+        _abstract()
+
+    def apply(self, headers):
+        """Add the authorization to the headers.
+
+        Args:
+            headers: dict, the headers to add the Authorization header to.
+        """
+        _abstract()
+
+    def _to_json(self, strip):
+        """Utility function that creates JSON repr. of a Credentials object.
+
+        Args:
+            strip: array, An array of names of members to not include in the
+                   JSON.
+
+        Returns:
+            string, a JSON representation of this instance, suitable to pass to
+            from_json().
+        """
+        t = type(self)
+        d = copy.copy(self.__dict__)
+        for member in strip:
+            if member in d:
+                del d[member]
+        if (d.get('token_expiry') and
+                isinstance(d['token_expiry'], datetime.datetime)):
+            d['token_expiry'] = d['token_expiry'].strftime(EXPIRY_FORMAT)
+        # Add in information we will need later to reconsistitue this instance.
+        d['_class'] = t.__name__
+        d['_module'] = t.__module__
+        for key, val in d.items():
+            if isinstance(val, bytes):
+                d[key] = val.decode('utf-8')
+            if isinstance(val, set):
+                d[key] = list(val)
+        return json.dumps(d)
+
+    def to_json(self):
+        """Creating a JSON representation of an instance of Credentials.
+
+        Returns:
+            string, a JSON representation of this instance, suitable to pass to
+            from_json().
+        """
+        return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
+
+    @classmethod
+    def new_from_json(cls, s):
+        """Utility class method to instantiate a Credentials subclass from JSON.
+
+        Expects the JSON string to have been produced by to_json().
+
+        Args:
+            s: string or bytes, JSON from to_json().
+
+        Returns:
+            An instance of the subclass of Credentials that was serialized with
+            to_json().
+        """
+        json_string_as_unicode = _from_bytes(s)
+        data = json.loads(json_string_as_unicode)
+        # Find and call the right classmethod from_json() to restore
+        # the object.
+        module_name = data['_module']
+        try:
+            module_obj = __import__(module_name)
+        except ImportError:
+            # In case there's an object from the old package structure,
+            # update it
+            module_name = module_name.replace('.googleapiclient', '')
+            module_obj = __import__(module_name)
+
+        module_obj = __import__(module_name,
+                                fromlist=module_name.split('.')[:-1])
+        kls = getattr(module_obj, data['_class'])
+        from_json = getattr(kls, 'from_json')
+        return from_json(json_string_as_unicode)
+
+    @classmethod
+    def from_json(cls, unused_data):
+        """Instantiate a Credentials object from a JSON description of it.
+
+        The JSON should have been produced by calling .to_json() on the object.
+
+        Args:
+            unused_data: dict, A deserialized JSON object.
+
+        Returns:
+            An instance of a Credentials subclass.
+        """
+        return Credentials()
+
+
+class Flow(object):
+    """Base class for all Flow objects."""
+    pass
+
+
+class Storage(object):
+    """Base class for all Storage objects.
+
+    Store and retrieve a single credential. This class supports locking
+    such that multiple processes and threads can operate on a single
+    store.
+    """
+
+    def acquire_lock(self):
+        """Acquires any lock necessary to access this Storage.
+
+        This lock is not reentrant.
+        """
+        pass
+
+    def release_lock(self):
+        """Release the Storage lock.
+
+        Trying to release a lock that isn't held will result in a
+        RuntimeError.
+        """
+        pass
+
+    def locked_get(self):
+        """Retrieve credential.
+
+        The Storage lock must be held when this is called.
+
+        Returns:
+            oauth2client.client.Credentials
+        """
+        _abstract()
+
+    def locked_put(self, credentials):
+        """Write a credential.
+
+        The Storage lock must be held when this is called.
+
+        Args:
+            credentials: Credentials, the credentials to store.
+        """
+        _abstract()
+
+    def locked_delete(self):
+        """Delete a credential.
+
+        The Storage lock must be held when this is called.
+        """
+        _abstract()
+
+    def get(self):
+        """Retrieve credential.
+
+        The Storage lock must *not* be held when this is called.
+
+        Returns:
+            oauth2client.client.Credentials
+        """
+        self.acquire_lock()
+        try:
+            return self.locked_get()
+        finally:
+            self.release_lock()
+
+    def put(self, credentials):
+        """Write a credential.
+
+        The Storage lock must be held when this is called.
+
+        Args:
+            credentials: Credentials, the credentials to store.
+        """
+        self.acquire_lock()
+        try:
+            self.locked_put(credentials)
+        finally:
+            self.release_lock()
+
+    def delete(self):
+        """Delete credential.
+
+        Frees any resources associated with storing the credential.
+        The Storage lock must *not* be held when this is called.
+
+        Returns:
+            None
+        """
+        self.acquire_lock()
+        try:
+            return self.locked_delete()
+        finally:
+            self.release_lock()
+
+
+def clean_headers(headers):
+    """Forces header keys and values to be strings, i.e not unicode.
+
+    The httplib module just concats the header keys and values in a way that
+    may make the message header a unicode string, which, if it then tries to
+    contatenate to a binary request body may result in a unicode decode error.
+
+    Args:
+        headers: dict, A dictionary of headers.
+
+    Returns:
+        The same dictionary but with all the keys converted to strings.
+    """
+    clean = {}
+    try:
+        for k, v in six.iteritems(headers):
+            if not isinstance(k, six.binary_type):
+                k = str(k)
+            if not isinstance(v, six.binary_type):
+                v = str(v)
+            clean[_to_bytes(k)] = _to_bytes(v)
+    except UnicodeEncodeError:
+        raise NonAsciiHeaderError(k, ': ', v)
+    return clean
+
+
+def _update_query_params(uri, params):
+    """Updates a URI with new query parameters.
+
+    Args:
+        uri: string, A valid URI, with potential existing query parameters.
+        params: dict, A dictionary of query parameters.
+
+    Returns:
+        The same URI but with the new query parameters added.
+    """
+    parts = urllib.parse.urlparse(uri)
+    query_params = dict(urllib.parse.parse_qsl(parts.query))
+    query_params.update(params)
+    new_parts = parts._replace(query=urllib.parse.urlencode(query_params))
+    return urllib.parse.urlunparse(new_parts)
+
+
+class OAuth2Credentials(Credentials):
+    """Credentials object for OAuth 2.0.
+
+    Credentials can be applied to an httplib2.Http object using the authorize()
+    method, which then adds the OAuth 2.0 access token to each request.
+
+    OAuth2Credentials objects may be safely pickled and unpickled.
+    """
+
+    @util.positional(8)
+    def __init__(self, access_token, client_id, client_secret, refresh_token,
+                 token_expiry, token_uri, user_agent, revoke_uri=None,
+                 id_token=None, token_response=None, scopes=None,
+                 token_info_uri=None):
+        """Create an instance of OAuth2Credentials.
+
+        This constructor is not usually called by the user, instead
+        OAuth2Credentials objects are instantiated by the OAuth2WebServerFlow.
+
+        Args:
+            access_token: string, access token.
+            client_id: string, client identifier.
+            client_secret: string, client secret.
+            refresh_token: string, refresh token.
+            token_expiry: datetime, when the access_token expires.
+            token_uri: string, URI of token endpoint.
+            user_agent: string, The HTTP User-Agent to provide for this
+                        application.
+            revoke_uri: string, URI for revoke endpoint. Defaults to None; a
+                        token can't be revoked if this is None.
+            id_token: object, The identity of the resource owner.
+            token_response: dict, the decoded response to the token request.
+                            None if a token hasn't been requested yet. Stored
+                            because some providers (e.g. wordpress.com) include
+                            extra fields that clients may want.
+            scopes: list, authorized scopes for these credentials.
+          token_info_uri: string, the URI for the token info endpoint. Defaults
+                          to None; scopes can not be refreshed if this is None.
+
+        Notes:
+            store: callable, A callable that when passed a Credential
+                   will store the credential back to where it came from.
+                   This is needed to store the latest access_token if it
+                   has expired and been refreshed.
+        """
+        self.access_token = access_token
+        self.client_id = client_id
+        self.client_secret = client_secret
+        self.refresh_token = refresh_token
+        self.store = None
+        self.token_expiry = token_expiry
+        self.token_uri = token_uri
+        self.user_agent = user_agent
+        self.revoke_uri = revoke_uri
+        self.id_token = id_token
+        self.token_response = token_response
+        self.scopes = set(util.string_to_scopes(scopes or []))
+        self.token_info_uri = token_info_uri
+
+        # True if the credentials have been revoked or expired and can't be
+        # refreshed.
+        self.invalid = False
+
+    def authorize(self, http):
+        """Authorize an httplib2.Http instance with these credentials.
+
+        The modified http.request method will add authentication headers to
+        each request and will refresh access_tokens when a 401 is received on a
+        request. In addition the http.request method has a credentials
+        property, http.request.credentials, which is the Credentials object
+        that authorized it.
+
+        Args:
+            http: An instance of ``httplib2.Http`` or something that acts
+                  like it.
+
+        Returns:
+            A modified instance of http that was passed in.
+
+        Example::
+
+            h = httplib2.Http()
+            h = credentials.authorize(h)
+
+        You can't create a new OAuth subclass of httplib2.Authentication
+        because it never gets passed the absolute URI, which is needed for
+        signing. So instead we have to overload 'request' with a closure
+        that adds in the Authorization header and then calls the original
+        version of 'request()'.
+        """
+        request_orig = http.request
+
+        # The closure that will replace 'httplib2.Http.request'.
+        def new_request(uri, method='GET', body=None, headers=None,
+                        redirections=httplib2.DEFAULT_MAX_REDIRECTS,
+                        connection_type=None):
+            if not self.access_token:
+                logger.info('Attempting refresh to obtain '
+                            'initial access_token')
+                self._refresh(request_orig)
+
+            # Clone and modify the request headers to add the appropriate
+            # Authorization header.
+            if headers is None:
+                headers = {}
+            else:
+                headers = dict(headers)
+            self.apply(headers)
+
+            if self.user_agent is not None:
+                if 'user-agent' in headers:
+                    headers['user-agent'] = (self.user_agent + ' ' +
+                                             headers['user-agent'])
+                else:
+                    headers['user-agent'] = self.user_agent
+
+            body_stream_position = None
+            if all(getattr(body, stream_prop, None) for stream_prop in
+                   ('read', 'seek', 'tell')):
+                body_stream_position = body.tell()
+
+            resp, content = request_orig(uri, method, body,
+                                         clean_headers(headers),
+                                         redirections, connection_type)
+
+            # A stored token may expire between the time it is retrieved and
+            # the time the request is made, so we may need to try twice.
+            max_refresh_attempts = 2
+            for refresh_attempt in range(max_refresh_attempts):
+                if resp.status not in REFRESH_STATUS_CODES:
+                    break
+                logger.info(
+                    'OAuth token TTL expired, auto-refreshing (attempt %s/%s)',
+                    refresh_attempt + 1,
+                    max_refresh_attempts)
+                self._refresh(request_orig)
+                self.apply(headers)
+                if body_stream_position is not None:
+                    body.seek(body_stream_position)
+
+                resp, content = request_orig(uri, method, body,
+                                             clean_headers(headers),
+                                             redirections, connection_type)
+
+            return (resp, content)
+
+        # Replace the request method with our own closure.
+        http.request = new_request
+
+        # Set credentials as a property of the request method.
+        setattr(http.request, 'credentials', self)
+
+        return http
+
+    def refresh(self, http):
+        """Forces a refresh of the access_token.
+
+        Args:
+            http: httplib2.Http, an http object to be used to make the refresh
+                  request.
+        """
+        self._refresh(http.request)
+
+    def revoke(self, http):
+        """Revokes a refresh_token and makes the credentials void.
+
+        Args:
+            http: httplib2.Http, an http object to be used to make the revoke
+                  request.
+        """
+        self._revoke(http.request)
+
+    def apply(self, headers):
+        """Add the authorization to the headers.
+
+        Args:
+            headers: dict, the headers to add the Authorization header to.
+        """
+        headers['Authorization'] = 'Bearer ' + self.access_token
+
+    def has_scopes(self, scopes):
+        """Verify that the credentials are authorized for the given scopes.
+
+        Returns True if the credentials authorized scopes contain all of the
+        scopes given.
+
+        Args:
+            scopes: list or string, the scopes to check.
+
+        Notes:
+            There are cases where the credentials are unaware of which scopes
+            are authorized. Notably, credentials obtained and stored before
+            this code was added will not have scopes, AccessTokenCredentials do
+            not have scopes. In both cases, you can use refresh_scopes() to
+            obtain the canonical set of scopes.
+        """
+        scopes = util.string_to_scopes(scopes)
+        return set(scopes).issubset(self.scopes)
+
+    def retrieve_scopes(self, http):
+        """Retrieves the canonical list of scopes for this access token.
+
+        Gets the scopes from the OAuth2 provider.
+
+        Args:
+            http: httplib2.Http, an http object to be used to make the refresh
+                  request.
+
+        Returns:
+            A set of strings containing the canonical list of scopes.
+        """
+        self._retrieve_scopes(http.request)
+        return self.scopes
+
+    def to_json(self):
+        return self._to_json(Credentials.NON_SERIALIZED_MEMBERS)
+
+    @classmethod
+    def from_json(cls, s):
+        """Instantiate a Credentials object from a JSON description of it.
+
+        The JSON should have been produced by calling .to_json() on the object.
+
+        Args:
+            data: dict, A deserialized JSON object.
+
+        Returns:
+            An instance of a Credentials subclass.
+        """
+        s = _from_bytes(s)
+        data = json.loads(s)
+        if (data.get('token_expiry') and
+                not isinstance(data['token_expiry'], datetime.datetime)):
+            try:
+                data['token_expiry'] = datetime.datetime.strptime(
+                    data['token_expiry'], EXPIRY_FORMAT)
+            except ValueError:
+                data['token_expiry'] = None
+        retval = cls(
+            data['access_token'],
+            data['client_id'],
+            data['client_secret'],
+            data['refresh_token'],
+            data['token_expiry'],
+            data['token_uri'],
+            data['user_agent'],
+            revoke_uri=data.get('revoke_uri', None),
+            id_token=data.get('id_token', None),
+            token_response=data.get('token_response', None),
+            scopes=data.get('scopes', None),
+            token_info_uri=data.get('token_info_uri', None))
+        retval.invalid = data['invalid']
+        return retval
+
+    @property
+    def access_token_expired(self):
+        """True if the credential is expired or invalid.
+
+        If the token_expiry isn't set, we assume the token doesn't expire.
+        """
+        if self.invalid:
+            return True
+
+        if not self.token_expiry:
+            return False
+
+        now = datetime.datetime.utcnow()
+        if now >= self.token_expiry:
+            logger.info('access_token is expired. Now: %s, token_expiry: %s',
+                        now, self.token_expiry)
+            return True
+        return False
+
+    def get_access_token(self, http=None):
+        """Return the access token and its expiration information.
+
+        If the token does not exist, get one.
+        If the token expired, refresh it.
+        """
+        if not self.access_token or self.access_token_expired:
+            if not http:
+                http = httplib2.Http()
+            self.refresh(http)
+        return AccessTokenInfo(access_token=self.access_token,
+                               expires_in=self._expires_in())
+
+    def set_store(self, store):
+        """Set the Storage for the credential.
+
+        Args:
+            store: Storage, an implementation of Storage object.
+                   This is needed to store the latest access_token if it
+                   has expired and been refreshed. This implementation uses
+                   locking to check for updates before updating the
+                   access_token.
+        """
+        self.store = store
+
+    def _expires_in(self):
+        """Return the number of seconds until this token expires.
+
+        If token_expiry is in the past, this method will return 0, meaning the
+        token has already expired.
+
+        If token_expiry is None, this method will return None. Note that
+        returning 0 in such a case would not be fair: the token may still be
+        valid; we just don't know anything about it.
+        """
+        if self.token_expiry:
+            now = datetime.datetime.utcnow()
+            if self.token_expiry > now:
+                time_delta = self.token_expiry - now
+                # TODO(orestica): return time_delta.total_seconds()
+                # once dropping support for Python 2.6
+                return time_delta.days * 86400 + time_delta.seconds
+            else:
+                return 0
+
+    def _updateFromCredential(self, other):
+        """Update this Credential from another instance."""
+        self.__dict__.update(other.__getstate__())
+
+    def __getstate__(self):
+        """Trim the state down to something that can be pickled."""
+        d = copy.copy(self.__dict__)
+        del d['store']
+        return d
+
+    def __setstate__(self, state):
+        """Reconstitute the state of the object from being pickled."""
+        self.__dict__.update(state)
+        self.store = None
+
+    def _generate_refresh_request_body(self):
+        """Generate the body that will be used in the refresh request."""
+        body = urllib.parse.urlencode({
+            'grant_type': 'refresh_token',
+            'client_id': self.client_id,
+            'client_secret': self.client_secret,
+            'refresh_token': self.refresh_token,
+        })
+        return body
+
+    def _generate_refresh_request_headers(self):
+        """Generate the headers that will be used in the refresh request."""
+        headers = {
+            'content-type': 'application/x-www-form-urlencoded',
+        }
+
+        if self.user_agent is not None:
+            headers['user-agent'] = self.user_agent
+
+        return headers
+
+    def _refresh(self, http_request):
+        """Refreshes the access_token.
+
+        This method first checks by reading the Storage object if available.
+        If a refresh is still needed, it holds the Storage lock until the
+        refresh is completed.
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          refresh request.
+
+        Raises:
+            HttpAccessTokenRefreshError: When the refresh fails.
+        """
+        if not self.store:
+            self._do_refresh_request(http_request)
+        else:
+            self.store.acquire_lock()
+            try:
+                new_cred = self.store.locked_get()
+
+                if (new_cred and not new_cred.invalid and
+                        new_cred.access_token != self.access_token and
+                        not new_cred.access_token_expired):
+                    logger.info('Updated access_token read from Storage')
+                    self._updateFromCredential(new_cred)
+                else:
+                    self._do_refresh_request(http_request)
+            finally:
+                self.store.release_lock()
+
+    def _do_refresh_request(self, http_request):
+        """Refresh the access_token using the refresh_token.
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          refresh request.
+
+        Raises:
+            HttpAccessTokenRefreshError: When the refresh fails.
+        """
+        body = self._generate_refresh_request_body()
+        headers = self._generate_refresh_request_headers()
+
+        logger.info('Refreshing access_token')
+        resp, content = http_request(
+            self.token_uri, method='POST', body=body, headers=headers)
+        content = _from_bytes(content)
+        if resp.status == 200:
+            d = json.loads(content)
+            self.token_response = d
+            self.access_token = d['access_token']
+            self.refresh_token = d.get('refresh_token', self.refresh_token)
+            if 'expires_in' in d:
+                self.token_expiry = datetime.timedelta(
+                    seconds=int(d['expires_in'])) + datetime.datetime.utcnow()
+            else:
+                self.token_expiry = None
+            # On temporary refresh errors, the user does not actually have to
+            # re-authorize, so we unflag here.
+            self.invalid = False
+            if self.store:
+                self.store.locked_put(self)
+        else:
+            # An {'error':...} response body means the token is expired or
+            # revoked, so we flag the credentials as such.
+            logger.info('Failed to retrieve access token: %s', content)
+            error_msg = 'Invalid response %s.' % resp['status']
+            try:
+                d = json.loads(content)
+                if 'error' in d:
+                    error_msg = d['error']
+                    if 'error_description' in d:
+                        error_msg += ': ' + d['error_description']
+                    self.invalid = True
+                    if self.store:
+                        self.store.locked_put(self)
+            except (TypeError, ValueError):
+                pass
+            raise HttpAccessTokenRefreshError(error_msg, status=resp.status)
+
+    def _revoke(self, http_request):
+        """Revokes this credential and deletes the stored copy (if it exists).
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          revoke request.
+        """
+        self._do_revoke(http_request, self.refresh_token or self.access_token)
+
+    def _do_revoke(self, http_request, token):
+        """Revokes this credential and deletes the stored copy (if it exists).
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          refresh request.
+            token: A string used as the token to be revoked. Can be either an
+                   access_token or refresh_token.
+
+        Raises:
+            TokenRevokeError: If the revoke request does not return with a
+                              200 OK.
+        """
+        logger.info('Revoking token')
+        query_params = {'token': token}
+        token_revoke_uri = _update_query_params(self.revoke_uri, query_params)
+        resp, content = http_request(token_revoke_uri)
+        if resp.status == 200:
+            self.invalid = True
+        else:
+            error_msg = 'Invalid response %s.' % resp.status
+            try:
+                d = json.loads(_from_bytes(content))
+                if 'error' in d:
+                    error_msg = d['error']
+            except (TypeError, ValueError):
+                pass
+            raise TokenRevokeError(error_msg)
+
+        if self.store:
+            self.store.delete()
+
+    def _retrieve_scopes(self, http_request):
+        """Retrieves the list of authorized scopes from the OAuth2 provider.
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          revoke request.
+        """
+        self._do_retrieve_scopes(http_request, self.access_token)
+
+    def _do_retrieve_scopes(self, http_request, token):
+        """Retrieves the list of authorized scopes from the OAuth2 provider.
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          refresh request.
+            token: A string used as the token to identify the credentials to
+                   the provider.
+
+        Raises:
+            Error: When refresh fails, indicating the the access token is
+                   invalid.
+        """
+        logger.info('Refreshing scopes')
+        query_params = {'access_token': token, 'fields': 'scope'}
+        token_info_uri = _update_query_params(self.token_info_uri,
+                                              query_params)
+        resp, content = http_request(token_info_uri)
+        content = _from_bytes(content)
+        if resp.status == 200:
+            d = json.loads(content)
+            self.scopes = set(util.string_to_scopes(d.get('scope', '')))
+        else:
+            error_msg = 'Invalid response %s.' % (resp.status,)
+            try:
+                d = json.loads(content)
+                if 'error_description' in d:
+                    error_msg = d['error_description']
+            except (TypeError, ValueError):
+                pass
+            raise Error(error_msg)
+
+
+class AccessTokenCredentials(OAuth2Credentials):
+    """Credentials object for OAuth 2.0.
+
+    Credentials can be applied to an httplib2.Http object using the
+    authorize() method, which then signs each request from that object
+    with the OAuth 2.0 access token. This set of credentials is for the
+    use case where you have acquired an OAuth 2.0 access_token from
+    another place such as a JavaScript client or another web
+    application, and wish to use it from Python. Because only the
+    access_token is present it can not be refreshed and will in time
+    expire.
+
+    AccessTokenCredentials objects may be safely pickled and unpickled.
+
+    Usage::
+
+        credentials = AccessTokenCredentials('<an access token>',
+            'my-user-agent/1.0')
+        http = httplib2.Http()
+        http = credentials.authorize(http)
+
+    Raises:
+        AccessTokenCredentialsExpired: raised when the access_token expires or
+                                       is revoked.
+    """
+
+    def __init__(self, access_token, user_agent, revoke_uri=None):
+        """Create an instance of OAuth2Credentials
+
+        This is one of the few types if Credentials that you should contrust,
+        Credentials objects are usually instantiated by a Flow.
+
+        Args:
+            access_token: string, access token.
+            user_agent: string, The HTTP User-Agent to provide for this
+                        application.
+            revoke_uri: string, URI for revoke endpoint. Defaults to None; a
+                        token can't be revoked if this is None.
+        """
+        super(AccessTokenCredentials, self).__init__(
+            access_token,
+            None,
+            None,
+            None,
+            None,
+            None,
+            user_agent,
+            revoke_uri=revoke_uri)
+
+    @classmethod
+    def from_json(cls, s):
+        data = json.loads(_from_bytes(s))
+        retval = AccessTokenCredentials(
+            data['access_token'],
+            data['user_agent'])
+        return retval
+
+    def _refresh(self, http_request):
+        raise AccessTokenCredentialsError(
+            'The access_token is expired or invalid and can\'t be refreshed.')
+
+    def _revoke(self, http_request):
+        """Revokes the access_token and deletes the store if available.
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          revoke request.
+        """
+        self._do_revoke(http_request, self.access_token)
+
+
+def _detect_gce_environment():
+    """Determine if the current environment is Compute Engine.
+
+    Returns:
+        Boolean indicating whether or not the current environment is Google
+        Compute Engine.
+    """
+    # NOTE: The explicit ``timeout`` is a workaround. The underlying
+    #       issue is that resolving an unknown host on some networks will take
+    #       20-30 seconds; making this timeout short fixes the issue, but
+    #       could lead to false negatives in the event that we are on GCE, but
+    #       the metadata resolution was particularly slow. The latter case is
+    #       "unlikely".
+    connection = six.moves.http_client.HTTPConnection(
+        _GCE_METADATA_HOST, timeout=1)
+
+    try:
+        headers = {_METADATA_FLAVOR_HEADER: _DESIRED_METADATA_FLAVOR}
+        connection.request('GET', '/', headers=headers)
+        response = connection.getresponse()
+        if response.status == 200:
+            return (response.getheader(_METADATA_FLAVOR_HEADER) ==
+                    _DESIRED_METADATA_FLAVOR)
+    except socket.error:  # socket.timeout or socket.error(64, 'Host is down')
+        logger.info('Timeout attempting to reach GCE metadata service.')
+        return False
+    finally:
+        connection.close()
+
+
+def _in_gae_environment():
+    """Detects if the code is running in the App Engine environment.
+
+    Returns:
+        True if running in the GAE environment, False otherwise.
+    """
+    if SETTINGS.env_name is not None:
+        return SETTINGS.env_name in ('GAE_PRODUCTION', 'GAE_LOCAL')
+
+    try:
+        import google.appengine  # noqa: unused import
+    except ImportError:
+        pass
+    else:
+        server_software = os.environ.get(_SERVER_SOFTWARE, '')
+        if server_software.startswith('Google App Engine/'):
+            SETTINGS.env_name = 'GAE_PRODUCTION'
+            return True
+        elif server_software.startswith('Development/'):
+            SETTINGS.env_name = 'GAE_LOCAL'
+            return True
+
+    return False
+
+
+def _in_gce_environment():
+    """Detect if the code is running in the Compute Engine environment.
+
+    Returns:
+        True if running in the GCE environment, False otherwise.
+    """
+    if SETTINGS.env_name is not None:
+        return SETTINGS.env_name == 'GCE_PRODUCTION'
+
+    if NO_GCE_CHECK != 'True' and _detect_gce_environment():
+        SETTINGS.env_name = 'GCE_PRODUCTION'
+        return True
+    return False
+
+
+class GoogleCredentials(OAuth2Credentials):
+    """Application Default Credentials for use in calling Google APIs.
+
+    The Application Default Credentials are being constructed as a function of
+    the environment where the code is being run.
+    More details can be found on this page:
+    https://developers.google.com/accounts/docs/application-default-credentials
+
+    Here is an example of how to use the Application Default Credentials for a
+    service that requires authentication::
+
+        from googleapiclient.discovery import build
+        from oauth2client.client import GoogleCredentials
+
+        credentials = GoogleCredentials.get_application_default()
+        service = build('compute', 'v1', credentials=credentials)
+
+        PROJECT = 'bamboo-machine-422'
+        ZONE = 'us-central1-a'
+        request = service.instances().list(project=PROJECT, zone=ZONE)
+        response = request.execute()
+
+        print(response)
+    """
+
+    def __init__(self, access_token, client_id, client_secret, refresh_token,
+                 token_expiry, token_uri, user_agent,
+                 revoke_uri=GOOGLE_REVOKE_URI):
+        """Create an instance of GoogleCredentials.
+
+        This constructor is not usually called by the user, instead
+        GoogleCredentials objects are instantiated by
+        GoogleCredentials.from_stream() or
+        GoogleCredentials.get_application_default().
+
+        Args:
+            access_token: string, access token.
+            client_id: string, client identifier.
+            client_secret: string, client secret.
+            refresh_token: string, refresh token.
+            token_expiry: datetime, when the access_token expires.
+            token_uri: string, URI of token endpoint.
+            user_agent: string, The HTTP User-Agent to provide for this
+                        application.
+            revoke_uri: string, URI for revoke endpoint. Defaults to
+                        GOOGLE_REVOKE_URI; a token can't be revoked if this
+                        is None.
+        """
+        super(GoogleCredentials, self).__init__(
+            access_token, client_id, client_secret, refresh_token,
+            token_expiry, token_uri, user_agent, revoke_uri=revoke_uri)
+
+    def create_scoped_required(self):
+        """Whether this Credentials object is scopeless.
+
+        create_scoped(scopes) method needs to be called in order to create
+        a Credentials object for API calls.
+        """
+        return False
+
+    def create_scoped(self, scopes):
+        """Create a Credentials object for the given scopes.
+
+        The Credentials type is preserved.
+        """
+        return self
+
+    @property
+    def serialization_data(self):
+        """Get the fields and values identifying the current credentials."""
+        return {
+            'type': 'authorized_user',
+            'client_id': self.client_id,
+            'client_secret': self.client_secret,
+            'refresh_token': self.refresh_token
+        }
+
+    @staticmethod
+    def _implicit_credentials_from_gae():
+        """Attempts to get implicit credentials in Google App Engine env.
+
+        If the current environment is not detected as App Engine, returns None,
+        indicating no Google App Engine credentials can be detected from the
+        current environment.
+
+        Returns:
+            None, if not in GAE, else an appengine.AppAssertionCredentials
+            object.
+        """
+        if not _in_gae_environment():
+            return None
+
+        return _get_application_default_credential_GAE()
+
+    @staticmethod
+    def _implicit_credentials_from_gce():
+        """Attempts to get implicit credentials in Google Compute Engine env.
+
+        If the current environment is not detected as Compute Engine, returns
+        None, indicating no Google Compute Engine credentials can be detected
+        from the current environment.
+
+        Returns:
+            None, if not in GCE, else a gce.AppAssertionCredentials object.
+        """
+        if not _in_gce_environment():
+            return None
+
+        return _get_application_default_credential_GCE()
+
+    @staticmethod
+    def _implicit_credentials_from_files():
+        """Attempts to get implicit credentials from local credential files.
+
+        First checks if the environment variable GOOGLE_APPLICATION_CREDENTIALS
+        is set with a filename and then falls back to a configuration file (the
+        "well known" file) associated with the 'gcloud' command line tool.
+
+        Returns:
+            Credentials object associated with the
+            GOOGLE_APPLICATION_CREDENTIALS file or the "well known" file if
+            either exist. If neither file is define, returns None, indicating
+            no credentials from a file can detected from the current
+            environment.
+        """
+        credentials_filename = _get_environment_variable_file()
+        if not credentials_filename:
+            credentials_filename = _get_well_known_file()
+            if os.path.isfile(credentials_filename):
+                extra_help = (' (produced automatically when running'
+                              ' "gcloud auth login" command)')
+            else:
+                credentials_filename = None
+        else:
+            extra_help = (' (pointed to by ' + GOOGLE_APPLICATION_CREDENTIALS +
+                          ' environment variable)')
+
+        if not credentials_filename:
+            return
+
+        # If we can read the credentials from a file, we don't need to know
+        # what environment we are in.
+        SETTINGS.env_name = DEFAULT_ENV_NAME
+
+        try:
+            return _get_application_default_credential_from_file(
+                credentials_filename)
+        except (ApplicationDefaultCredentialsError, ValueError) as error:
+            _raise_exception_for_reading_json(credentials_filename,
+                                              extra_help, error)
+
+    @classmethod
+    def _get_implicit_credentials(cls):
+        """Gets credentials implicitly from the environment.
+
+        Checks environment in order of precedence:
+        - Google App Engine (production and testing)
+        - Environment variable GOOGLE_APPLICATION_CREDENTIALS pointing to
+          a file with stored credentials information.
+        - Stored "well known" file associated with `gcloud` command line tool.
+        - Google Compute Engine production environment.
+
+        Raises:
+            ApplicationDefaultCredentialsError: raised when the credentials
+                                                fail to be retrieved.
+        """
+        # Environ checks (in order).
+        environ_checkers = [
+            cls._implicit_credentials_from_gae,
+            cls._implicit_credentials_from_files,
+            cls._implicit_credentials_from_gce,
+        ]
+
+        for checker in environ_checkers:
+            credentials = checker()
+            if credentials is not None:
+                return credentials
+
+        # If no credentials, fail.
+        raise ApplicationDefaultCredentialsError(ADC_HELP_MSG)
+
+    @staticmethod
+    def get_application_default():
+        """Get the Application Default Credentials for the current environment.
+
+        Raises:
+            ApplicationDefaultCredentialsError: raised when the credentials
+                                                fail to be retrieved.
+        """
+        return GoogleCredentials._get_implicit_credentials()
+
+    @staticmethod
+    def from_stream(credential_filename):
+        """Create a Credentials object by reading information from a file.
+
+        It returns an object of type GoogleCredentials.
+
+        Args:
+            credential_filename: the path to the file from where the
+                                 credentials are to be read
+
+        Raises:
+            ApplicationDefaultCredentialsError: raised when the credentials
+                                                fail to be retrieved.
+        """
+        if credential_filename and os.path.isfile(credential_filename):
+            try:
+                return _get_application_default_credential_from_file(
+                    credential_filename)
+            except (ApplicationDefaultCredentialsError, ValueError) as error:
+                extra_help = (' (provided as parameter to the '
+                              'from_stream() method)')
+                _raise_exception_for_reading_json(credential_filename,
+                                                  extra_help,
+                                                  error)
+        else:
+            raise ApplicationDefaultCredentialsError(
+                'The parameter passed to the from_stream() '
+                'method should point to a file.')
+
+
+def _save_private_file(filename, json_contents):
+    """Saves a file with read-write permissions on for the owner.
+
+    Args:
+        filename: String. Absolute path to file.
+        json_contents: JSON serializable object to be saved.
+    """
+    temp_filename = tempfile.mktemp()
+    file_desc = os.open(temp_filename, os.O_WRONLY | os.O_CREAT, 0o600)
+    with os.fdopen(file_desc, 'w') as file_handle:
+        json.dump(json_contents, file_handle, sort_keys=True,
+                  indent=2, separators=(',', ': '))
+    shutil.move(temp_filename, filename)
+
+
+def save_to_well_known_file(credentials, well_known_file=None):
+    """Save the provided GoogleCredentials to the well known file.
+
+    Args:
+        credentials: the credentials to be saved to the well known file;
+                     it should be an instance of GoogleCredentials
+        well_known_file: the name of the file where the credentials are to be
+                         saved; this parameter is supposed to be used for
+                         testing only
+    """
+    # TODO(orestica): move this method to tools.py
+    # once the argparse import gets fixed (it is not present in Python 2.6)
+
+    if well_known_file is None:
+        well_known_file = _get_well_known_file()
+
+    config_dir = os.path.dirname(well_known_file)
+    if not os.path.isdir(config_dir):
+        raise OSError('Config directory does not exist: %s' % config_dir)
+
+    credentials_data = credentials.serialization_data
+    _save_private_file(well_known_file, credentials_data)
+
+
+def _get_environment_variable_file():
+    application_default_credential_filename = (
+      os.environ.get(GOOGLE_APPLICATION_CREDENTIALS,
+                     None))
+
+    if application_default_credential_filename:
+        if os.path.isfile(application_default_credential_filename):
+            return application_default_credential_filename
+        else:
+            raise ApplicationDefaultCredentialsError(
+                'File ' + application_default_credential_filename +
+                ' (pointed by ' +
+                GOOGLE_APPLICATION_CREDENTIALS +
+                ' environment variable) does not exist!')
+
+
+def _get_well_known_file():
+    """Get the well known file produced by command 'gcloud auth login'."""
+    # TODO(orestica): Revisit this method once gcloud provides a better way
+    # of pinpointing the exact location of the file.
+
+    WELL_KNOWN_CREDENTIALS_FILE = 'application_default_credentials.json'
+
+    default_config_dir = os.getenv(_CLOUDSDK_CONFIG_ENV_VAR)
+    if default_config_dir is None:
+        if os.name == 'nt':
+            try:
+                default_config_dir = os.path.join(os.environ['APPDATA'],
+                                                  _CLOUDSDK_CONFIG_DIRECTORY)
+            except KeyError:
+                # This should never happen unless someone is really
+                # messing with things.
+                drive = os.environ.get('SystemDrive', 'C:')
+                default_config_dir = os.path.join(drive, '\\',
+                                                  _CLOUDSDK_CONFIG_DIRECTORY)
+        else:
+            default_config_dir = os.path.join(os.path.expanduser('~'),
+                                              '.config',
+                                              _CLOUDSDK_CONFIG_DIRECTORY)
+
+    return os.path.join(default_config_dir, WELL_KNOWN_CREDENTIALS_FILE)
+
+
+def _get_application_default_credential_from_file(filename):
+    """Build the Application Default Credentials from file."""
+
+    from oauth2client import service_account
+
+    # read the credentials from the file
+    with open(filename) as file_obj:
+        client_credentials = json.load(file_obj)
+
+    credentials_type = client_credentials.get('type')
+    if credentials_type == AUTHORIZED_USER:
+        required_fields = set(['client_id', 'client_secret', 'refresh_token'])
+    elif credentials_type == SERVICE_ACCOUNT:
+        required_fields = set(['client_id', 'client_email', 'private_key_id',
+                               'private_key'])
+    else:
+        raise ApplicationDefaultCredentialsError(
+            "'type' field should be defined (and have one of the '" +
+            AUTHORIZED_USER + "' or '" + SERVICE_ACCOUNT + "' values)")
+
+    missing_fields = required_fields.difference(client_credentials.keys())
+
+    if missing_fields:
+        _raise_exception_for_missing_fields(missing_fields)
+
+    if client_credentials['type'] == AUTHORIZED_USER:
+        return GoogleCredentials(
+            access_token=None,
+            client_id=client_credentials['client_id'],
+            client_secret=client_credentials['client_secret'],
+            refresh_token=client_credentials['refresh_token'],
+            token_expiry=None,
+            token_uri=GOOGLE_TOKEN_URI,
+            user_agent='Python client library')
+    else:  # client_credentials['type'] == SERVICE_ACCOUNT
+        return service_account._ServiceAccountCredentials(
+            service_account_id=client_credentials['client_id'],
+            service_account_email=client_credentials['client_email'],
+            private_key_id=client_credentials['private_key_id'],
+            private_key_pkcs8_text=client_credentials['private_key'],
+            scopes=[])
+
+
+def _raise_exception_for_missing_fields(missing_fields):
+    raise ApplicationDefaultCredentialsError(
+        'The following field(s) must be defined: ' + ', '.join(missing_fields))
+
+
+def _raise_exception_for_reading_json(credential_file,
+                                      extra_help,
+                                      error):
+    raise ApplicationDefaultCredentialsError(
+      'An error was encountered while reading json file: ' +
+      credential_file + extra_help + ': ' + str(error))
+
+
+def _get_application_default_credential_GAE():
+    from oauth2client.appengine import AppAssertionCredentials
+
+    return AppAssertionCredentials([])
+
+
+def _get_application_default_credential_GCE():
+    from oauth2client.gce import AppAssertionCredentials
+
+    return AppAssertionCredentials([])
+
+
+class AssertionCredentials(GoogleCredentials):
+    """Abstract Credentials object used for OAuth 2.0 assertion grants.
+
+    This credential does not require a flow to instantiate because it
+    represents a two legged flow, and therefore has all of the required
+    information to generate and refresh its own access tokens. It must
+    be subclassed to generate the appropriate assertion string.
+
+    AssertionCredentials objects may be safely pickled and unpickled.
+    """
+
+    @util.positional(2)
+    def __init__(self, assertion_type, user_agent=None,
+                 token_uri=GOOGLE_TOKEN_URI,
+                 revoke_uri=GOOGLE_REVOKE_URI,
+                 **unused_kwargs):
+        """Constructor for AssertionFlowCredentials.
+
+        Args:
+            assertion_type: string, assertion type that will be declared to the
+                            auth server
+            user_agent: string, The HTTP User-Agent to provide for this
+                        application.
+            token_uri: string, URI for token endpoint. For convenience defaults
+                       to Google's endpoints but any OAuth 2.0 provider can be
+                       used.
+            revoke_uri: string, URI for revoke endpoint.
+        """
+        super(AssertionCredentials, self).__init__(
+            None,
+            None,
+            None,
+            None,
+            None,
+            token_uri,
+            user_agent,
+            revoke_uri=revoke_uri)
+        self.assertion_type = assertion_type
+
+    def _generate_refresh_request_body(self):
+        assertion = self._generate_assertion()
+
+        body = urllib.parse.urlencode({
+            'assertion': assertion,
+            'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
+        })
+
+        return body
+
+    def _generate_assertion(self):
+        """Generate assertion string to be used in the access token request."""
+        _abstract()
+
+    def _revoke(self, http_request):
+        """Revokes the access_token and deletes the store if available.
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make the
+                          revoke request.
+        """
+        self._do_revoke(http_request, self.access_token)
+
+
+def _RequireCryptoOrDie():
+    """Ensure we have a crypto library, or throw CryptoUnavailableError.
+
+    The oauth2client.crypt module requires either PyCrypto or PyOpenSSL
+    to be available in order to function, but these are optional
+    dependencies.
+    """
+    if not HAS_CRYPTO:
+        raise CryptoUnavailableError('No crypto library available')
+
+
+class SignedJwtAssertionCredentials(AssertionCredentials):
+    """Credentials object used for OAuth 2.0 Signed JWT assertion grants.
+
+    This credential does not require a flow to instantiate because it
+    represents a two legged flow, and therefore has all of the required
+    information to generate and refresh its own access tokens.
+
+    SignedJwtAssertionCredentials requires either PyOpenSSL, or PyCrypto
+    2.6 or later. For App Engine you may also consider using
+    AppAssertionCredentials.
+    """
+
+    MAX_TOKEN_LIFETIME_SECS = 3600  # 1 hour in seconds
+
+    @util.positional(4)
+    def __init__(self,
+                 service_account_name,
+                 private_key,
+                 scope,
+                 private_key_password='notasecret',
+                 user_agent=None,
+                 token_uri=GOOGLE_TOKEN_URI,
+                 revoke_uri=GOOGLE_REVOKE_URI,
+                 **kwargs):
+        """Constructor for SignedJwtAssertionCredentials.
+
+        Args:
+            service_account_name: string, id for account, usually an email
+                                  address.
+            private_key: string or bytes, private key in PKCS12 or PEM format.
+            scope: string or iterable of strings, scope(s) of the credentials
+                   being requested.
+            private_key_password: string, password for private_key, unused if
+                                  private_key is in PEM format.
+            user_agent: string, HTTP User-Agent to provide for this
+                        application.
+            token_uri: string, URI for token endpoint. For convenience defaults
+                       to Google's endpoints but any OAuth 2.0 provider can be
+                       used.
+            revoke_uri: string, URI for revoke endpoint.
+            kwargs: kwargs, Additional parameters to add to the JWT token, for
+                    example [email protected].
+
+        Raises:
+            CryptoUnavailableError if no crypto library is available.
+        """
+        _RequireCryptoOrDie()
+        super(SignedJwtAssertionCredentials, self).__init__(
+            None,
+            user_agent=user_agent,
+            token_uri=token_uri,
+            revoke_uri=revoke_uri,
+        )
+
+        self.scope = util.scopes_to_string(scope)
+
+        # Keep base64 encoded so it can be stored in JSON.
+        self.private_key = base64.b64encode(_to_bytes(private_key))
+        self.private_key_password = private_key_password
+        self.service_account_name = service_account_name
+        self.kwargs = kwargs
+
+    @classmethod
+    def from_json(cls, s):
+        data = json.loads(_from_bytes(s))
+        retval = SignedJwtAssertionCredentials(
+            data['service_account_name'],
+            base64.b64decode(data['private_key']),
+            data['scope'],
+            private_key_password=data['private_key_password'],
+            user_agent=data['user_agent'],
+            token_uri=data['token_uri'],
+            **data['kwargs']
+        )
+        retval.invalid = data['invalid']
+        retval.access_token = data['access_token']
+        return retval
+
+    def _generate_assertion(self):
+        """Generate the assertion that will be used in the request."""
+        now = int(time.time())
+        payload = {
+            'aud': self.token_uri,
+            'scope': self.scope,
+            'iat': now,
+            'exp': now + SignedJwtAssertionCredentials.MAX_TOKEN_LIFETIME_SECS,
+            'iss': self.service_account_name
+        }
+        payload.update(self.kwargs)
+        logger.debug(str(payload))
+
+        private_key = base64.b64decode(self.private_key)
+        return crypt.make_signed_jwt(crypt.Signer.from_string(
+            private_key, self.private_key_password), payload)
+
+# Only used in verify_id_token(), which is always calling to the same URI
+# for the certs.
+_cached_http = httplib2.Http(MemoryCache())
+
+
[email protected](2)
+def verify_id_token(id_token, audience, http=None,
+                    cert_uri=ID_TOKEN_VERIFICATION_CERTS):
+    """Verifies a signed JWT id_token.
+
+    This function requires PyOpenSSL and because of that it does not work on
+    App Engine.
+
+    Args:
+        id_token: string, A Signed JWT.
+        audience: string, The audience 'aud' that the token should be for.
+        http: httplib2.Http, instance to use to make the HTTP request. Callers
+              should supply an instance that has caching enabled.
+        cert_uri: string, URI of the certificates in JSON format to
+                  verify the JWT against.
+
+    Returns:
+        The deserialized JSON in the JWT.
+
+    Raises:
+        oauth2client.crypt.AppIdentityError: if the JWT fails to verify.
+        CryptoUnavailableError: if no crypto library is available.
+    """
+    _RequireCryptoOrDie()
+    if http is None:
+        http = _cached_http
+
+    resp, content = http.request(cert_uri)
+    if resp.status == 200:
+        certs = json.loads(_from_bytes(content))
+        return crypt.verify_signed_jwt_with_certs(id_token, certs, audience)
+    else:
+        raise VerifyJwtTokenError('Status code: %d' % resp.status)
+
+
+def _extract_id_token(id_token):
+    """Extract the JSON payload from a JWT.
+
+    Does the extraction w/o checking the signature.
+
+    Args:
+        id_token: string or bytestring, OAuth 2.0 id_token.
+
+    Returns:
+        object, The deserialized JSON payload.
+    """
+    if type(id_token) == bytes:
+        segments = id_token.split(b'.')
+    else:
+        segments = id_token.split(u'.')
+
+    if len(segments) != 3:
+        raise VerifyJwtTokenError(
+            'Wrong number of segments in token: %s' % id_token)
+
+    return json.loads(_from_bytes(_urlsafe_b64decode(segments[1])))
+
+
+def _parse_exchange_token_response(content):
+    """Parses response of an exchange token request.
+
+    Most providers return JSON but some (e.g. Facebook) return a
+    url-encoded string.
+
+    Args:
+        content: The body of a response
+
+    Returns:
+        Content as a dictionary object. Note that the dict could be empty,
+        i.e. {}. That basically indicates a failure.
+    """
+    resp = {}
+    content = _from_bytes(content)
+    try:
+        resp = json.loads(content)
+    except Exception:
+        # different JSON libs raise different exceptions,
+        # so we just do a catch-all here
+        resp = dict(urllib.parse.parse_qsl(content))
+
+    # some providers respond with 'expires', others with 'expires_in'
+    if resp and 'expires' in resp:
+        resp['expires_in'] = resp.pop('expires')
+
+    return resp
+
+
[email protected](4)
+def credentials_from_code(client_id, client_secret, scope, code,
+                          redirect_uri='postmessage', http=None,
+                          user_agent=None, token_uri=GOOGLE_TOKEN_URI,
+                          auth_uri=GOOGLE_AUTH_URI,
+                          revoke_uri=GOOGLE_REVOKE_URI,
+                          device_uri=GOOGLE_DEVICE_URI,
+                          token_info_uri=GOOGLE_TOKEN_INFO_URI):
+    """Exchanges an authorization code for an OAuth2Credentials object.
+
+    Args:
+        client_id: string, client identifier.
+        client_secret: string, client secret.
+        scope: string or iterable of strings, scope(s) to request.
+        code: string, An authorization code, most likely passed down from
+              the client
+        redirect_uri: string, this is generally set to 'postmessage' to match
+                      the redirect_uri that the client specified
+        http: httplib2.Http, optional http instance to use to do the fetch
+        token_uri: string, URI for token endpoint. For convenience defaults
+                   to Google's endpoints but any OAuth 2.0 provider can be
+                   used.
+        auth_uri: string, URI for authorization endpoint. For convenience
+                  defaults to Google's endpoints but any OAuth 2.0 provider
+                  can be used.
+        revoke_uri: string, URI for revoke endpoint. For convenience
+                    defaults to Google's endpoints but any OAuth 2.0 provider
+                    can be used.
+        device_uri: string, URI for device authorization endpoint. For
+                    convenience defaults to Google's endpoints but any OAuth
+                    2.0 provider can be used.
+
+    Returns:
+        An OAuth2Credentials object.
+
+    Raises:
+        FlowExchangeError if the authorization code cannot be exchanged for an
+        access token
+    """
+    flow = OAuth2WebServerFlow(client_id, client_secret, scope,
+                               redirect_uri=redirect_uri,
+                               user_agent=user_agent, auth_uri=auth_uri,
+                               token_uri=token_uri, revoke_uri=revoke_uri,
+                               device_uri=device_uri,
+                               token_info_uri=token_info_uri)
+
+    credentials = flow.step2_exchange(code, http=http)
+    return credentials
+
+
[email protected](3)
+def credentials_from_clientsecrets_and_code(filename, scope, code,
+                                            message=None,
+                                            redirect_uri='postmessage',
+                                            http=None,
+                                            cache=None,
+                                            device_uri=None):
+    """Returns OAuth2Credentials from a clientsecrets file and an auth code.
+
+    Will create the right kind of Flow based on the contents of the
+    clientsecrets file or will raise InvalidClientSecretsError for unknown
+    types of Flows.
+
+    Args:
+        filename: string, File name of clientsecrets.
+        scope: string or iterable of strings, scope(s) to request.
+        code: string, An authorization code, most likely passed down from
+              the client
+        message: string, A friendly string to display to the user if the
+                 clientsecrets file is missing or invalid. If message is
+                 provided then sys.exit will be called in the case of an error.
+                 If message in not provided then
+                 clientsecrets.InvalidClientSecretsError will be raised.
+        redirect_uri: string, this is generally set to 'postmessage' to match
+                      the redirect_uri that the client specified
+        http: httplib2.Http, optional http instance to use to do the fetch
+        cache: An optional cache service client that implements get() and set()
+               methods. See clientsecrets.loadfile() for details.
+        device_uri: string, OAuth 2.0 device authorization endpoint
+
+    Returns:
+        An OAuth2Credentials object.
+
+    Raises:
+        FlowExchangeError: if the authorization code cannot be exchanged for an
+                           access token
+        UnknownClientSecretsFlowError: if the file describes an unknown kind
+                                       of Flow.
+        clientsecrets.InvalidClientSecretsError: if the clientsecrets file is
+                                                 invalid.
+    """
+    flow = flow_from_clientsecrets(filename, scope, message=message,
+                                   cache=cache, redirect_uri=redirect_uri,
+                                   device_uri=device_uri)
+    credentials = flow.step2_exchange(code, http=http)
+    return credentials
+
+
+class DeviceFlowInfo(collections.namedtuple('DeviceFlowInfo', (
+        'device_code', 'user_code', 'interval', 'verification_url',
+        'user_code_expiry'))):
+    """Intermediate information the OAuth2 for devices flow."""
+
+    @classmethod
+    def FromResponse(cls, response):
+        """Create a DeviceFlowInfo from a server response.
+
+        The response should be a dict containing entries as described here:
+
+        http://tools.ietf.org/html/draft-ietf-oauth-v2-05#section-3.7.1
+        """
+        # device_code, user_code, and verification_url are required.
+        kwargs = {
+            'device_code': response['device_code'],
+            'user_code': response['user_code'],
+        }
+        # The response may list the verification address as either
+        # verification_url or verification_uri, so we check for both.
+        verification_url = response.get(
+            'verification_url', response.get('verification_uri'))
+        if verification_url is None:
+            raise OAuth2DeviceCodeError(
+                'No verification_url provided in server response')
+        kwargs['verification_url'] = verification_url
+        # expires_in and interval are optional.
+        kwargs.update({
+            'interval': response.get('interval'),
+            'user_code_expiry': None,
+        })
+        if 'expires_in' in response:
+            kwargs['user_code_expiry'] = (
+                datetime.datetime.now() +
+                datetime.timedelta(seconds=int(response['expires_in'])))
+        return cls(**kwargs)
+
+
+class OAuth2WebServerFlow(Flow):
+    """Does the Web Server Flow for OAuth 2.0.
+
+    OAuth2WebServerFlow objects may be safely pickled and unpickled.
+    """
+
+    @util.positional(4)
+    def __init__(self, client_id,
+                 client_secret=None,
+                 scope=None,
+                 redirect_uri=None,
+                 user_agent=None,
+                 auth_uri=GOOGLE_AUTH_URI,
+                 token_uri=GOOGLE_TOKEN_URI,
+                 revoke_uri=GOOGLE_REVOKE_URI,
+                 login_hint=None,
+                 device_uri=GOOGLE_DEVICE_URI,
+                 token_info_uri=GOOGLE_TOKEN_INFO_URI,
+                 authorization_header=None,
+                 **kwargs):
+        """Constructor for OAuth2WebServerFlow.
+
+        The kwargs argument is used to set extra query parameters on the
+        auth_uri. For example, the access_type and approval_prompt
+        query parameters can be set via kwargs.
+
+        Args:
+            client_id: string, client identifier.
+            client_secret: string client secret.
+            scope: string or iterable of strings, scope(s) of the credentials
+                   being requested.
+            redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
+                          for a non-web-based application, or a URI that
+                          handles the callback from the authorization server.
+            user_agent: string, HTTP User-Agent to provide for this
+                        application.
+            auth_uri: string, URI for authorization endpoint. For convenience
+                      defaults to Google's endpoints but any OAuth 2.0 provider
+                      can be used.
+            token_uri: string, URI for token endpoint. For convenience
+                       defaults to Google's endpoints but any OAuth 2.0
+                       provider can be used.
+            revoke_uri: string, URI for revoke endpoint. For convenience
+                        defaults to Google's endpoints but any OAuth 2.0
+                        provider can be used.
+            login_hint: string, Either an email address or domain. Passing this
+                        hint will either pre-fill the email box on the sign-in
+                        form or select the proper multi-login session, thereby
+                        simplifying the login flow.
+            device_uri: string, URI for device authorization endpoint. For
+                        convenience defaults to Google's endpoints but any
+                        OAuth 2.0 provider can be used.
+            authorization_header: string, For use with OAuth 2.0 providers that
+                                  require a client to authenticate using a
+                                  header value instead of passing client_secret
+                                  in the POST body.
+            **kwargs: dict, The keyword arguments are all optional and required
+                      parameters for the OAuth calls.
+        """
+        # scope is a required argument, but to preserve backwards-compatibility
+        # we don't want to rearrange the positional arguments
+        if scope is None:
+            raise TypeError("The value of scope must not be None")
+        self.client_id = client_id
+        self.client_secret = client_secret
+        self.scope = util.scopes_to_string(scope)
+        self.redirect_uri = redirect_uri
+        self.login_hint = login_hint
+        self.user_agent = user_agent
+        self.auth_uri = auth_uri
+        self.token_uri = token_uri
+        self.revoke_uri = revoke_uri
+        self.device_uri = device_uri
+        self.token_info_uri = token_info_uri
+        self.authorization_header = authorization_header
+        self.params = {
+            'access_type': 'offline',
+            'response_type': 'code',
+        }
+        self.params.update(kwargs)
+
+    @util.positional(1)
+    def step1_get_authorize_url(self, redirect_uri=None, state=None):
+        """Returns a URI to redirect to the provider.
+
+        Args:
+            redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob'
+                          for a non-web-based application, or a URI that
+                          handles the callback from the authorization server.
+                          This parameter is deprecated, please move to passing
+                          the redirect_uri in via the constructor.
+            state: string, Opaque state string which is passed through the
+                   OAuth2 flow and returned to the client as a query parameter
+                   in the callback.
+
+        Returns:
+            A URI as a string to redirect the user to begin the authorization
+            flow.
+        """
+        if redirect_uri is not None:
+            logger.warning((
+                'The redirect_uri parameter for '
+                'OAuth2WebServerFlow.step1_get_authorize_url is deprecated. '
+                'Please move to passing the redirect_uri in via the '
+                'constructor.'))
+            self.redirect_uri = redirect_uri
+
+        if self.redirect_uri is None:
+            raise ValueError('The value of redirect_uri must not be None.')
+
+        query_params = {
+            'client_id': self.client_id,
+            'redirect_uri': self.redirect_uri,
+            'scope': self.scope,
+        }
+        if state is not None:
+            query_params['state'] = state
+        if self.login_hint is not None:
+            query_params['login_hint'] = self.login_hint
+        query_params.update(self.params)
+        return _update_query_params(self.auth_uri, query_params)
+
+    @util.positional(1)
+    def step1_get_device_and_user_codes(self, http=None):
+        """Returns a user code and the verification URL where to enter it
+
+        Returns:
+            A user code as a string for the user to authorize the application
+            An URL as a string where the user has to enter the code
+        """
+        if self.device_uri is None:
+            raise ValueError('The value of device_uri must not be None.')
+
+        body = urllib.parse.urlencode({
+            'client_id': self.client_id,
+            'scope': self.scope,
+        })
+        headers = {
+            'content-type': 'application/x-www-form-urlencoded',
+        }
+
+        if self.user_agent is not None:
+            headers['user-agent'] = self.user_agent
+
+        if http is None:
+            http = httplib2.Http()
+
+        resp, content = http.request(self.device_uri, method='POST', body=body,
+                                     headers=headers)
+        content = _from_bytes(content)
+        if resp.status == 200:
+            try:
+                flow_info = json.loads(content)
+            except ValueError as e:
+                raise OAuth2DeviceCodeError(
+                    'Could not parse server response as JSON: "%s", '
+                    'error: "%s"' % (content, e))
+            return DeviceFlowInfo.FromResponse(flow_info)
+        else:
+            error_msg = 'Invalid response %s.' % resp.status
+            try:
+                d = json.loads(content)
+                if 'error' in d:
+                    error_msg += ' Error: %s' % d['error']
+            except ValueError:
+                # Couldn't decode a JSON response, stick with the
+                # default message.
+                pass
+            raise OAuth2DeviceCodeError(error_msg)
+
+    @util.positional(2)
+    def step2_exchange(self, code=None, http=None, device_flow_info=None):
+        """Exchanges a code for OAuth2Credentials.
+
+        Args:
+            code: string, a dict-like object, or None. For a non-device
+                  flow, this is either the response code as a string, or a
+                  dictionary of query parameters to the redirect_uri. For a
+                  device flow, this should be None.
+            http: httplib2.Http, optional http instance to use when fetching
+                  credentials.
+            device_flow_info: DeviceFlowInfo, return value from step1 in the
+                              case of a device flow.
+
+        Returns:
+            An OAuth2Credentials object that can be used to authorize requests.
+
+        Raises:
+            FlowExchangeError: if a problem occurred exchanging the code for a
+                               refresh_token.
+            ValueError: if code and device_flow_info are both provided or both
+                        missing.
+        """
+        if code is None and device_flow_info is None:
+            raise ValueError('No code or device_flow_info provided.')
+        if code is not None and device_flow_info is not None:
+            raise ValueError('Cannot provide both code and device_flow_info.')
+
+        if code is None:
+            code = device_flow_info.device_code
+        elif not isinstance(code, six.string_types):
+            if 'code' not in code:
+                raise FlowExchangeError(code.get(
+                    'error', 'No code was supplied in the query parameters.'))
+            code = code['code']
+
+        post_data = {
+            'client_id': self.client_id,
+            'code': code,
+            'scope': self.scope,
+        }
+        if self.client_secret is not None:
+            post_data['client_secret'] = self.client_secret
+        if device_flow_info is not None:
+            post_data['grant_type'] = 'http://oauth.net/grant_type/device/1.0'
+        else:
+            post_data['grant_type'] = 'authorization_code'
+            post_data['redirect_uri'] = self.redirect_uri
+        body = urllib.parse.urlencode(post_data)
+        headers = {
+            'content-type': 'application/x-www-form-urlencoded',
+        }
+        if self.authorization_header is not None:
+            headers['Authorization'] = self.authorization_header
+        if self.user_agent is not None:
+            headers['user-agent'] = self.user_agent
+
+        if http is None:
+            http = httplib2.Http()
+
+        resp, content = http.request(self.token_uri, method='POST', body=body,
+                                     headers=headers)
+        d = _parse_exchange_token_response(content)
+        if resp.status == 200 and 'access_token' in d:
+            access_token = d['access_token']
+            refresh_token = d.get('refresh_token', None)
+            if not refresh_token:
+                logger.info(
+                    'Received token response with no refresh_token. Consider '
+                    "reauthenticating with approval_prompt='force'.")
+            token_expiry = None
+            if 'expires_in' in d:
+                token_expiry = (
+                    datetime.datetime.utcnow() +
+                    datetime.timedelta(seconds=int(d['expires_in'])))
+
+            extracted_id_token = None
+            if 'id_token' in d:
+                extracted_id_token = _extract_id_token(d['id_token'])
+
+            logger.info('Successfully retrieved access token')
+            return OAuth2Credentials(
+                access_token, self.client_id, self.client_secret,
+                refresh_token, token_expiry, self.token_uri, self.user_agent,
+                revoke_uri=self.revoke_uri, id_token=extracted_id_token,
+                token_response=d, scopes=self.scope,
+                token_info_uri=self.token_info_uri)
+        else:
+            logger.info('Failed to retrieve access token: %s', content)
+            if 'error' in d:
+                # you never know what those providers got to say
+                error_msg = (str(d['error']) +
+                             str(d.get('error_description', '')))
+            else:
+                error_msg = 'Invalid response: %s.' % str(resp.status)
+            raise FlowExchangeError(error_msg)
+
+
[email protected](2)
+def flow_from_clientsecrets(filename, scope, redirect_uri=None,
+                            message=None, cache=None, login_hint=None,
+                            device_uri=None):
+    """Create a Flow from a clientsecrets file.
+
+    Will create the right kind of Flow based on the contents of the
+    clientsecrets file or will raise InvalidClientSecretsError for unknown
+    types of Flows.
+
+    Args:
+        filename: string, File name of client secrets.
+        scope: string or iterable of strings, scope(s) to request.
+        redirect_uri: string, Either the string 'urn:ietf:wg:oauth:2.0:oob' for
+                      a non-web-based application, or a URI that handles the
+                      callback from the authorization server.
+        message: string, A friendly string to display to the user if the
+                 clientsecrets file is missing or invalid. If message is
+                 provided then sys.exit will be called in the case of an error.
+                 If message in not provided then
+                 clientsecrets.InvalidClientSecretsError will be raised.
+        cache: An optional cache service client that implements get() and set()
+               methods. See clientsecrets.loadfile() for details.
+        login_hint: string, Either an email address or domain. Passing this
+                    hint will either pre-fill the email box on the sign-in form
+                    or select the proper multi-login session, thereby
+                    simplifying the login flow.
+        device_uri: string, URI for device authorization endpoint. For
+                    convenience defaults to Google's endpoints but any
+                    OAuth 2.0 provider can be used.
+
+    Returns:
+        A Flow object.
+
+    Raises:
+        UnknownClientSecretsFlowError: if the file describes an unknown kind of
+                                       Flow.
+        clientsecrets.InvalidClientSecretsError: if the clientsecrets file is
+                                                 invalid.
+    """
+    try:
+        client_type, client_info = clientsecrets.loadfile(filename,
+                                                          cache=cache)
+        if client_type in (clientsecrets.TYPE_WEB,
+                           clientsecrets.TYPE_INSTALLED):
+            constructor_kwargs = {
+                'redirect_uri': redirect_uri,
+                'auth_uri': client_info['auth_uri'],
+                'token_uri': client_info['token_uri'],
+                'login_hint': login_hint,
+            }
+            revoke_uri = client_info.get('revoke_uri')
+            if revoke_uri is not None:
+                constructor_kwargs['revoke_uri'] = revoke_uri
+            if device_uri is not None:
+                constructor_kwargs['device_uri'] = device_uri
+            return OAuth2WebServerFlow(
+                client_info['client_id'], client_info['client_secret'],
+                scope, **constructor_kwargs)
+
+    except clientsecrets.InvalidClientSecretsError:
+        if message:
+            sys.exit(message)
+        else:
+            raise
+    else:
+        raise UnknownClientSecretsFlowError(
+            'This OAuth 2.0 flow is unsupported: %r' % client_type)
diff --git a/utils/frozen_chromite/third_party/oauth2client/clientsecrets.py b/utils/frozen_chromite/third_party/oauth2client/clientsecrets.py
new file mode 100644
index 0000000..eba1fd9
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/clientsecrets.py
@@ -0,0 +1,173 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for reading OAuth 2.0 client secret files.
+
+A client_secrets.json file contains all the information needed to interact with
+an OAuth 2.0 protected service.
+"""
+
+import json
+import six
+
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+# Properties that make a client_secrets.json file valid.
+TYPE_WEB = 'web'
+TYPE_INSTALLED = 'installed'
+
+VALID_CLIENT = {
+    TYPE_WEB: {
+        'required': [
+            'client_id',
+            'client_secret',
+            'redirect_uris',
+            'auth_uri',
+            'token_uri',
+        ],
+        'string': [
+            'client_id',
+            'client_secret',
+        ],
+    },
+    TYPE_INSTALLED: {
+        'required': [
+            'client_id',
+            'client_secret',
+            'redirect_uris',
+            'auth_uri',
+            'token_uri',
+        ],
+        'string': [
+            'client_id',
+            'client_secret',
+        ],
+    },
+}
+
+
+class Error(Exception):
+    """Base error for this module."""
+
+
+class InvalidClientSecretsError(Error):
+    """Format of ClientSecrets file is invalid."""
+
+
+def _validate_clientsecrets(clientsecrets_dict):
+    """Validate parsed client secrets from a file.
+
+    Args:
+        clientsecrets_dict: dict, a dictionary holding the client secrets.
+
+    Returns:
+        tuple, a string of the client type and the information parsed
+        from the file.
+    """
+    _INVALID_FILE_FORMAT_MSG = (
+        'Invalid file format. See '
+        'https://developers.google.com/api-client-library/'
+        'python/guide/aaa_client_secrets')
+
+    if clientsecrets_dict is None:
+        raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)
+    try:
+        (client_type, client_info), = clientsecrets_dict.items()
+    except (ValueError, AttributeError):
+        raise InvalidClientSecretsError(
+            _INVALID_FILE_FORMAT_MSG + ' '
+            'Expected a JSON object with a single property for a "web" or '
+            '"installed" application')
+
+    if client_type not in VALID_CLIENT:
+        raise InvalidClientSecretsError(
+            'Unknown client type: %s.' % (client_type,))
+
+    for prop_name in VALID_CLIENT[client_type]['required']:
+        if prop_name not in client_info:
+            raise InvalidClientSecretsError(
+                'Missing property "%s" in a client type of "%s".' %
+                (prop_name, client_type))
+    for prop_name in VALID_CLIENT[client_type]['string']:
+        if client_info[prop_name].startswith('[['):
+            raise InvalidClientSecretsError(
+                'Property "%s" is not configured.' % prop_name)
+    return client_type, client_info
+
+
+def load(fp):
+    obj = json.load(fp)
+    return _validate_clientsecrets(obj)
+
+
+def loads(s):
+    obj = json.loads(s)
+    return _validate_clientsecrets(obj)
+
+
+def _loadfile(filename):
+    try:
+        with open(filename, 'r') as fp:
+            obj = json.load(fp)
+    except IOError:
+        raise InvalidClientSecretsError('File not found: "%s"' % filename)
+    return _validate_clientsecrets(obj)
+
+
+def loadfile(filename, cache=None):
+    """Loading of client_secrets JSON file, optionally backed by a cache.
+
+    Typical cache storage would be App Engine memcache service,
+    but you can pass in any other cache client that implements
+    these methods:
+
+    * ``get(key, namespace=ns)``
+    * ``set(key, value, namespace=ns)``
+
+    Usage::
+
+        # without caching
+        client_type, client_info = loadfile('secrets.json')
+        # using App Engine memcache service
+        from google.appengine.api import memcache
+        client_type, client_info = loadfile('secrets.json', cache=memcache)
+
+    Args:
+        filename: string, Path to a client_secrets.json file on a filesystem.
+        cache: An optional cache service client that implements get() and set()
+        methods. If not specified, the file is always being loaded from
+                 a filesystem.
+
+    Raises:
+        InvalidClientSecretsError: In case of a validation error or some
+                                   I/O failure. Can happen only on cache miss.
+
+    Returns:
+        (client_type, client_info) tuple, as _loadfile() normally would.
+        JSON contents is validated only during first load. Cache hits are not
+        validated.
+    """
+    _SECRET_NAMESPACE = 'oauth2client:secrets#ns'
+
+    if not cache:
+        return _loadfile(filename)
+
+    obj = cache.get(filename, namespace=_SECRET_NAMESPACE)
+    if obj is None:
+        client_type, client_info = _loadfile(filename)
+        obj = {client_type: client_info}
+        cache.set(filename, obj, namespace=_SECRET_NAMESPACE)
+
+    return next(six.iteritems(obj))
diff --git a/utils/frozen_chromite/third_party/oauth2client/crypt.py b/utils/frozen_chromite/third_party/oauth2client/crypt.py
new file mode 100644
index 0000000..c450c5c
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/crypt.py
@@ -0,0 +1,243 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Crypto-related routines for oauth2client."""
+
+import json
+import logging
+import time
+
+from oauth2client._helpers import _from_bytes
+from oauth2client._helpers import _json_encode
+from oauth2client._helpers import _to_bytes
+from oauth2client._helpers import _urlsafe_b64decode
+from oauth2client._helpers import _urlsafe_b64encode
+
+
+CLOCK_SKEW_SECS = 300  # 5 minutes in seconds
+AUTH_TOKEN_LIFETIME_SECS = 300  # 5 minutes in seconds
+MAX_TOKEN_LIFETIME_SECS = 86400  # 1 day in seconds
+
+logger = logging.getLogger(__name__)
+
+
+class AppIdentityError(Exception):
+    """Error to indicate crypto failure."""
+
+
+def _bad_pkcs12_key_as_pem(*args, **kwargs):
+    raise NotImplementedError('pkcs12_key_as_pem requires OpenSSL.')
+
+
+try:
+    from oauth2client._openssl_crypt import OpenSSLVerifier
+    from oauth2client._openssl_crypt import OpenSSLSigner
+    from oauth2client._openssl_crypt import pkcs12_key_as_pem
+except ImportError:  # pragma: NO COVER
+    OpenSSLVerifier = None
+    OpenSSLSigner = None
+    pkcs12_key_as_pem = _bad_pkcs12_key_as_pem
+
+try:
+    from oauth2client._pycrypto_crypt import PyCryptoVerifier
+    from oauth2client._pycrypto_crypt import PyCryptoSigner
+except ImportError:  # pragma: NO COVER
+    PyCryptoVerifier = None
+    PyCryptoSigner = None
+
+
+if OpenSSLSigner:
+    Signer = OpenSSLSigner
+    Verifier = OpenSSLVerifier
+elif PyCryptoSigner:  # pragma: NO COVER
+    Signer = PyCryptoSigner
+    Verifier = PyCryptoVerifier
+else:  # pragma: NO COVER
+    raise ImportError('No encryption library found. Please install either '
+                      'PyOpenSSL, or PyCrypto 2.6 or later')
+
+
+def make_signed_jwt(signer, payload):
+    """Make a signed JWT.
+
+    See http://self-issued.info/docs/draft-jones-json-web-token.html.
+
+    Args:
+        signer: crypt.Signer, Cryptographic signer.
+        payload: dict, Dictionary of data to convert to JSON and then sign.
+
+    Returns:
+        string, The JWT for the payload.
+    """
+    header = {'typ': 'JWT', 'alg': 'RS256'}
+
+    segments = [
+      _urlsafe_b64encode(_json_encode(header)),
+      _urlsafe_b64encode(_json_encode(payload)),
+    ]
+    signing_input = b'.'.join(segments)
+
+    signature = signer.sign(signing_input)
+    segments.append(_urlsafe_b64encode(signature))
+
+    logger.debug(str(segments))
+
+    return b'.'.join(segments)
+
+
+def _verify_signature(message, signature, certs):
+    """Verifies signed content using a list of certificates.
+
+    Args:
+        message: string or bytes, The message to verify.
+        signature: string or bytes, The signature on the message.
+        certs: iterable, certificates in PEM format.
+
+    Raises:
+        AppIdentityError: If none of the certificates can verify the message
+                          against the signature.
+    """
+    for pem in certs:
+        verifier = Verifier.from_string(pem, is_x509_cert=True)
+        if verifier.verify(message, signature):
+            return
+
+    # If we have not returned, no certificate confirms the signature.
+    raise AppIdentityError('Invalid token signature')
+
+
+def _check_audience(payload_dict, audience):
+    """Checks audience field from a JWT payload.
+
+    Does nothing if the passed in ``audience`` is null.
+
+    Args:
+        payload_dict: dict, A dictionary containing a JWT payload.
+        audience: string or NoneType, an audience to check for in
+                  the JWT payload.
+
+    Raises:
+        AppIdentityError: If there is no ``'aud'`` field in the payload
+                          dictionary but there is an ``audience`` to check.
+        AppIdentityError: If the ``'aud'`` field in the payload dictionary
+                          does not match the ``audience``.
+    """
+    if audience is None:
+        return
+
+    audience_in_payload = payload_dict.get('aud')
+    if audience_in_payload is None:
+        raise AppIdentityError('No aud field in token: %s' %
+                               (payload_dict,))
+    if audience_in_payload != audience:
+        raise AppIdentityError('Wrong recipient, %s != %s: %s' %
+                               (audience_in_payload, audience, payload_dict))
+
+
+def _verify_time_range(payload_dict):
+    """Verifies the issued at and expiration from a JWT payload.
+
+    Makes sure the current time (in UTC) falls between the issued at and
+    expiration for the JWT (with some skew allowed for via
+    ``CLOCK_SKEW_SECS``).
+
+    Args:
+        payload_dict: dict, A dictionary containing a JWT payload.
+
+    Raises:
+        AppIdentityError: If there is no ``'iat'`` field in the payload
+                          dictionary.
+        AppIdentityError: If there is no ``'exp'`` field in the payload
+                          dictionary.
+        AppIdentityError: If the JWT expiration is too far in the future (i.e.
+                          if the expiration would imply a token lifetime
+                          longer than what is allowed.)
+        AppIdentityError: If the token appears to have been issued in the
+                          future (up to clock skew).
+        AppIdentityError: If the token appears to have expired in the past
+                          (up to clock skew).
+    """
+    # Get the current time to use throughout.
+    now = int(time.time())
+
+    # Make sure issued at and expiration are in the payload.
+    issued_at = payload_dict.get('iat')
+    if issued_at is None:
+        raise AppIdentityError('No iat field in token: %s' % (payload_dict,))
+    expiration = payload_dict.get('exp')
+    if expiration is None:
+        raise AppIdentityError('No exp field in token: %s' % (payload_dict,))
+
+    # Make sure the expiration gives an acceptable token lifetime.
+    if expiration >= now + MAX_TOKEN_LIFETIME_SECS:
+        raise AppIdentityError('exp field too far in future: %s' %
+                               (payload_dict,))
+
+    # Make sure (up to clock skew) that the token wasn't issued in the future.
+    earliest = issued_at - CLOCK_SKEW_SECS
+    if now < earliest:
+        raise AppIdentityError('Token used too early, %d < %d: %s' %
+                               (now, earliest, payload_dict))
+    # Make sure (up to clock skew) that the token isn't already expired.
+    latest = expiration + CLOCK_SKEW_SECS
+    if now > latest:
+        raise AppIdentityError('Token used too late, %d > %d: %s' %
+                               (now, latest, payload_dict))
+
+
+def verify_signed_jwt_with_certs(jwt, certs, audience=None):
+    """Verify a JWT against public certs.
+
+    See http://self-issued.info/docs/draft-jones-json-web-token.html.
+
+    Args:
+        jwt: string, A JWT.
+        certs: dict, Dictionary where values of public keys in PEM format.
+        audience: string, The audience, 'aud', that this JWT should contain. If
+                  None then the JWT's 'aud' parameter is not verified.
+
+    Returns:
+        dict, The deserialized JSON payload in the JWT.
+
+    Raises:
+        AppIdentityError: if any checks are failed.
+    """
+    jwt = _to_bytes(jwt)
+
+    if jwt.count(b'.') != 2:
+        raise AppIdentityError(
+            'Wrong number of segments in token: %s' % (jwt,))
+
+    header, payload, signature = jwt.split(b'.')
+    message_to_sign = header + b'.' + payload
+    signature = _urlsafe_b64decode(signature)
+
+    # Parse token.
+    payload_bytes = _urlsafe_b64decode(payload)
+    try:
+        payload_dict = json.loads(_from_bytes(payload_bytes))
+    except:
+        raise AppIdentityError('Can\'t parse token: %s' % (payload_bytes,))
+
+    # Verify that the signature matches the message.
+    _verify_signature(message_to_sign, signature, certs.values())
+
+    # Verify the issued at and created times in the payload.
+    _verify_time_range(payload_dict)
+
+    # Check audience.
+    _check_audience(payload_dict, audience)
+
+    return payload_dict
diff --git a/utils/frozen_chromite/third_party/oauth2client/file.py b/utils/frozen_chromite/third_party/oauth2client/file.py
new file mode 100644
index 0000000..d0dd174
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/file.py
@@ -0,0 +1,122 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for OAuth.
+
+Utilities for making it easier to work with OAuth 2.0
+credentials.
+"""
+
+import os
+import threading
+
+from oauth2client.client import Credentials
+from oauth2client.client import Storage as BaseStorage
+
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+
+class CredentialsFileSymbolicLinkError(Exception):
+    """Credentials files must not be symbolic links."""
+
+
+class Storage(BaseStorage):
+    """Store and retrieve a single credential to and from a file."""
+
+    def __init__(self, filename):
+        self._filename = filename
+        self._lock = threading.Lock()
+
+    def _validate_file(self):
+        if os.path.islink(self._filename):
+            raise CredentialsFileSymbolicLinkError(
+                'File: %s is a symbolic link.' % self._filename)
+
+    def acquire_lock(self):
+        """Acquires any lock necessary to access this Storage.
+
+        This lock is not reentrant.
+        """
+        self._lock.acquire()
+
+    def release_lock(self):
+        """Release the Storage lock.
+
+        Trying to release a lock that isn't held will result in a
+        RuntimeError.
+        """
+        self._lock.release()
+
+    def locked_get(self):
+        """Retrieve Credential from file.
+
+        Returns:
+            oauth2client.client.Credentials
+
+        Raises:
+            CredentialsFileSymbolicLinkError if the file is a symbolic link.
+        """
+        credentials = None
+        self._validate_file()
+        try:
+            f = open(self._filename, 'rb')
+            content = f.read()
+            f.close()
+        except IOError:
+            return credentials
+
+        try:
+            credentials = Credentials.new_from_json(content)
+            credentials.set_store(self)
+        except ValueError:
+            pass
+
+        return credentials
+
+    def _create_file_if_needed(self):
+        """Create an empty file if necessary.
+
+        This method will not initialize the file. Instead it implements a
+        simple version of "touch" to ensure the file has been created.
+        """
+        if not os.path.exists(self._filename):
+            old_umask = os.umask(0o177)
+            try:
+                open(self._filename, 'a+b').close()
+            finally:
+                os.umask(old_umask)
+
+    def locked_put(self, credentials):
+        """Write Credentials to file.
+
+        Args:
+            credentials: Credentials, the credentials to store.
+
+        Raises:
+            CredentialsFileSymbolicLinkError if the file is a symbolic link.
+        """
+        self._create_file_if_needed()
+        self._validate_file()
+        f = open(self._filename, 'w')
+        f.write(credentials.to_json())
+        f.close()
+
+    def locked_delete(self):
+        """Delete Credentials file.
+
+        Args:
+            credentials: Credentials, the credentials to store.
+        """
+        os.unlink(self._filename)
diff --git a/utils/frozen_chromite/third_party/oauth2client/gce.py b/utils/frozen_chromite/third_party/oauth2client/gce.py
new file mode 100644
index 0000000..77b08f1
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/gce.py
@@ -0,0 +1,111 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utilities for Google Compute Engine
+
+Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
+"""
+
+import json
+import logging
+from six.moves import urllib
+
+from oauth2client._helpers import _from_bytes
+from oauth2client import util
+from oauth2client.client import HttpAccessTokenRefreshError
+from oauth2client.client import AssertionCredentials
+
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+logger = logging.getLogger(__name__)
+
+# URI Template for the endpoint that returns access_tokens.
+META = ('http://metadata.google.internal/0.1/meta-data/service-accounts/'
+        'default/acquire{?scope}')
+
+
+class AppAssertionCredentials(AssertionCredentials):
+    """Credentials object for Compute Engine Assertion Grants
+
+    This object will allow a Compute Engine instance to identify itself to
+    Google and other OAuth 2.0 servers that can verify assertions. It can be
+    used for the purpose of accessing data stored under an account assigned to
+    the Compute Engine instance itself.
+
+    This credential does not require a flow to instantiate because it
+    represents a two legged flow, and therefore has all of the required
+    information to generate and refresh its own access tokens.
+    """
+
+    @util.positional(2)
+    def __init__(self, scope, **kwargs):
+        """Constructor for AppAssertionCredentials
+
+        Args:
+            scope: string or iterable of strings, scope(s) of the credentials
+                   being requested.
+        """
+        self.scope = util.scopes_to_string(scope)
+        self.kwargs = kwargs
+
+        # Assertion type is no longer used, but still in the
+        # parent class signature.
+        super(AppAssertionCredentials, self).__init__(None)
+
+    @classmethod
+    def from_json(cls, json_data):
+        data = json.loads(_from_bytes(json_data))
+        return AppAssertionCredentials(data['scope'])
+
+    def _refresh(self, http_request):
+        """Refreshes the access_token.
+
+        Skip all the storage hoops and just refresh using the API.
+
+        Args:
+            http_request: callable, a callable that matches the method
+                          signature of httplib2.Http.request, used to make
+                          the refresh request.
+
+        Raises:
+            HttpAccessTokenRefreshError: When the refresh fails.
+        """
+        query = '?scope=%s' % urllib.parse.quote(self.scope, '')
+        uri = META.replace('{?scope}', query)
+        response, content = http_request(uri)
+        content = _from_bytes(content)
+        if response.status == 200:
+            try:
+                d = json.loads(content)
+            except Exception as e:
+                raise HttpAccessTokenRefreshError(str(e),
+                                                  status=response.status)
+            self.access_token = d['accessToken']
+        else:
+            if response.status == 404:
+                content += (' This can occur if a VM was created'
+                            ' with no service account or scopes.')
+            raise HttpAccessTokenRefreshError(content, status=response.status)
+
+    @property
+    def serialization_data(self):
+        raise NotImplementedError(
+            'Cannot serialize credentials for GCE service accounts.')
+
+    def create_scoped_required(self):
+        return not self.scope
+
+    def create_scoped(self, scopes):
+        return AppAssertionCredentials(scopes, **self.kwargs)
diff --git a/utils/frozen_chromite/third_party/oauth2client/keyring_storage.py b/utils/frozen_chromite/third_party/oauth2client/keyring_storage.py
new file mode 100644
index 0000000..0a4c285
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/keyring_storage.py
@@ -0,0 +1,114 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A keyring based Storage.
+
+A Storage for Credentials that uses the keyring module.
+"""
+
+import threading
+
+import keyring
+
+from oauth2client.client import Credentials
+from oauth2client.client import Storage as BaseStorage
+
+
+__author__ = '[email protected] (Joe Gregorio)'
+
+
+class Storage(BaseStorage):
+    """Store and retrieve a single credential to and from the keyring.
+
+    To use this module you must have the keyring module installed. See
+    <http://pypi.python.org/pypi/keyring/>. This is an optional module and is
+    not installed with oauth2client by default because it does not work on all
+    the platforms that oauth2client supports, such as Google App Engine.
+
+    The keyring module <http://pypi.python.org/pypi/keyring/> is a
+    cross-platform library for access the keyring capabilities of the local
+    system. The user will be prompted for their keyring password when this
+    module is used, and the manner in which the user is prompted will vary per
+    platform.
+
+    Usage::
+
+        from oauth2client.keyring_storage import Storage
+
+        s = Storage('name_of_application', 'user1')
+        credentials = s.get()
+
+    """
+
+    def __init__(self, service_name, user_name):
+        """Constructor.
+
+        Args:
+            service_name: string, The name of the service under which the
+                          credentials are stored.
+            user_name: string, The name of the user to store credentials for.
+        """
+        self._service_name = service_name
+        self._user_name = user_name
+        self._lock = threading.Lock()
+
+    def acquire_lock(self):
+        """Acquires any lock necessary to access this Storage.
+
+        This lock is not reentrant.
+        """
+        self._lock.acquire()
+
+    def release_lock(self):
+        """Release the Storage lock.
+
+        Trying to release a lock that isn't held will result in a
+        RuntimeError.
+        """
+        self._lock.release()
+
+    def locked_get(self):
+        """Retrieve Credential from file.
+
+        Returns:
+            oauth2client.client.Credentials
+        """
+        credentials = None
+        content = keyring.get_password(self._service_name, self._user_name)
+
+        if content is not None:
+            try:
+                credentials = Credentials.new_from_json(content)
+                credentials.set_store(self)
+            except ValueError:
+                pass
+
+        return credentials
+
+    def locked_put(self, credentials):
+        """Write Credentials to file.
+
+        Args:
+            credentials: Credentials, the credentials to store.
+        """
+        keyring.set_password(self._service_name, self._user_name,
+                             credentials.to_json())
+
+    def locked_delete(self):
+        """Delete Credentials file.
+
+        Args:
+            credentials: Credentials, the credentials to store.
+        """
+        keyring.set_password(self._service_name, self._user_name, '')
diff --git a/utils/frozen_chromite/third_party/oauth2client/locked_file.py b/utils/frozen_chromite/third_party/oauth2client/locked_file.py
new file mode 100644
index 0000000..1028a7e
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/locked_file.py
@@ -0,0 +1,387 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Locked file interface that should work on Unix and Windows pythons.
+
+This module first tries to use fcntl locking to ensure serialized access
+to a file, then falls back on a lock file if that is unavialable.
+
+Usage::
+
+    f = LockedFile('filename', 'r+b', 'rb')
+    f.open_and_lock()
+    if f.is_locked():
+      print('Acquired filename with r+b mode')
+      f.file_handle().write('locked data')
+    else:
+      print('Acquired filename with rb mode')
+    f.unlock_and_close()
+
+"""
+
+from __future__ import print_function
+
+import errno
+import logging
+import os
+import time
+
+from oauth2client import util
+
+
+__author__ = '[email protected] (David T McWherter)'
+
+logger = logging.getLogger(__name__)
+
+
+class CredentialsFileSymbolicLinkError(Exception):
+    """Credentials files must not be symbolic links."""
+
+
+class AlreadyLockedException(Exception):
+    """Trying to lock a file that has already been locked by the LockedFile."""
+    pass
+
+
+def validate_file(filename):
+    if os.path.islink(filename):
+        raise CredentialsFileSymbolicLinkError(
+            'File: %s is a symbolic link.' % filename)
+
+
+class _Opener(object):
+    """Base class for different locking primitives."""
+
+    def __init__(self, filename, mode, fallback_mode):
+        """Create an Opener.
+
+        Args:
+            filename: string, The pathname of the file.
+            mode: string, The preferred mode to access the file with.
+            fallback_mode: string, The mode to use if locking fails.
+        """
+        self._locked = False
+        self._filename = filename
+        self._mode = mode
+        self._fallback_mode = fallback_mode
+        self._fh = None
+        self._lock_fd = None
+
+    def is_locked(self):
+        """Was the file locked."""
+        return self._locked
+
+    def file_handle(self):
+        """The file handle to the file. Valid only after opened."""
+        return self._fh
+
+    def filename(self):
+        """The filename that is being locked."""
+        return self._filename
+
+    def open_and_lock(self, timeout, delay):
+        """Open the file and lock it.
+
+        Args:
+            timeout: float, How long to try to lock for.
+            delay: float, How long to wait between retries.
+        """
+        pass
+
+    def unlock_and_close(self):
+        """Unlock and close the file."""
+        pass
+
+
+class _PosixOpener(_Opener):
+    """Lock files using Posix advisory lock files."""
+
+    def open_and_lock(self, timeout, delay):
+        """Open the file and lock it.
+
+        Tries to create a .lock file next to the file we're trying to open.
+
+        Args:
+            timeout: float, How long to try to lock for.
+            delay: float, How long to wait between retries.
+
+        Raises:
+            AlreadyLockedException: if the lock is already acquired.
+            IOError: if the open fails.
+            CredentialsFileSymbolicLinkError if the file is a symbolic link.
+        """
+        if self._locked:
+            raise AlreadyLockedException('File %s is already locked' %
+                                         self._filename)
+        self._locked = False
+
+        validate_file(self._filename)
+        try:
+            self._fh = open(self._filename, self._mode)
+        except IOError as e:
+            # If we can't access with _mode, try _fallback_mode and don't lock.
+            if e.errno == errno.EACCES:
+                self._fh = open(self._filename, self._fallback_mode)
+                return
+
+        lock_filename = self._posix_lockfile(self._filename)
+        start_time = time.time()
+        while True:
+            try:
+                self._lock_fd = os.open(lock_filename,
+                                        os.O_CREAT | os.O_EXCL | os.O_RDWR)
+                self._locked = True
+                break
+
+            except OSError as e:
+                if e.errno != errno.EEXIST:
+                    raise
+                if (time.time() - start_time) >= timeout:
+                    logger.warn('Could not acquire lock %s in %s seconds',
+                                lock_filename, timeout)
+                    # Close the file and open in fallback_mode.
+                    if self._fh:
+                        self._fh.close()
+                    self._fh = open(self._filename, self._fallback_mode)
+                    return
+                time.sleep(delay)
+
+    def unlock_and_close(self):
+        """Unlock a file by removing the .lock file, and close the handle."""
+        if self._locked:
+            lock_filename = self._posix_lockfile(self._filename)
+            os.close(self._lock_fd)
+            os.unlink(lock_filename)
+            self._locked = False
+            self._lock_fd = None
+        if self._fh:
+            self._fh.close()
+
+    def _posix_lockfile(self, filename):
+        """The name of the lock file to use for posix locking."""
+        return '%s.lock' % filename
+
+
+try:
+    import fcntl
+
+    class _FcntlOpener(_Opener):
+        """Open, lock, and unlock a file using fcntl.lockf."""
+
+        def open_and_lock(self, timeout, delay):
+            """Open the file and lock it.
+
+            Args:
+                timeout: float, How long to try to lock for.
+                delay: float, How long to wait between retries
+
+            Raises:
+                AlreadyLockedException: if the lock is already acquired.
+                IOError: if the open fails.
+                CredentialsFileSymbolicLinkError: if the file is a symbolic
+                                                  link.
+            """
+            if self._locked:
+                raise AlreadyLockedException('File %s is already locked' %
+                                             self._filename)
+            start_time = time.time()
+
+            validate_file(self._filename)
+            try:
+                self._fh = open(self._filename, self._mode)
+            except IOError as e:
+                # If we can't access with _mode, try _fallback_mode and
+                # don't lock.
+                if e.errno in (errno.EPERM, errno.EACCES):
+                    self._fh = open(self._filename, self._fallback_mode)
+                    return
+
+            # We opened in _mode, try to lock the file.
+            while True:
+                try:
+                    fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)
+                    self._locked = True
+                    return
+                except IOError as e:
+                    # If not retrying, then just pass on the error.
+                    if timeout == 0:
+                        raise
+                    if e.errno != errno.EACCES:
+                        raise
+                    # We could not acquire the lock. Try again.
+                    if (time.time() - start_time) >= timeout:
+                        logger.warn('Could not lock %s in %s seconds',
+                                    self._filename, timeout)
+                        if self._fh:
+                            self._fh.close()
+                        self._fh = open(self._filename, self._fallback_mode)
+                        return
+                    time.sleep(delay)
+
+        def unlock_and_close(self):
+            """Close and unlock the file using the fcntl.lockf primitive."""
+            if self._locked:
+                fcntl.lockf(self._fh.fileno(), fcntl.LOCK_UN)
+            self._locked = False
+            if self._fh:
+                self._fh.close()
+except ImportError:
+    _FcntlOpener = None
+
+
+try:
+    import pywintypes
+    import win32con
+    import win32file
+
+    class _Win32Opener(_Opener):
+        """Open, lock, and unlock a file using windows primitives."""
+
+        # Error #33:
+        #  'The process cannot access the file because another process'
+        FILE_IN_USE_ERROR = 33
+
+        # Error #158:
+        #  'The segment is already unlocked.'
+        FILE_ALREADY_UNLOCKED_ERROR = 158
+
+        def open_and_lock(self, timeout, delay):
+            """Open the file and lock it.
+
+            Args:
+                timeout: float, How long to try to lock for.
+                delay: float, How long to wait between retries
+
+            Raises:
+                AlreadyLockedException: if the lock is already acquired.
+                IOError: if the open fails.
+                CredentialsFileSymbolicLinkError: if the file is a symbolic
+                                                  link.
+            """
+            if self._locked:
+                raise AlreadyLockedException('File %s is already locked' %
+                                             self._filename)
+            start_time = time.time()
+
+            validate_file(self._filename)
+            try:
+                self._fh = open(self._filename, self._mode)
+            except IOError as e:
+                # If we can't access with _mode, try _fallback_mode
+                # and don't lock.
+                if e.errno == errno.EACCES:
+                    self._fh = open(self._filename, self._fallback_mode)
+                    return
+
+            # We opened in _mode, try to lock the file.
+            while True:
+                try:
+                    hfile = win32file._get_osfhandle(self._fh.fileno())
+                    win32file.LockFileEx(
+                        hfile,
+                        (win32con.LOCKFILE_FAIL_IMMEDIATELY |
+                         win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,
+                        pywintypes.OVERLAPPED())
+                    self._locked = True
+                    return
+                except pywintypes.error as e:
+                    if timeout == 0:
+                        raise
+
+                    # If the error is not that the file is already
+                    # in use, raise.
+                    if e[0] != _Win32Opener.FILE_IN_USE_ERROR:
+                        raise
+
+                    # We could not acquire the lock. Try again.
+                    if (time.time() - start_time) >= timeout:
+                        logger.warn('Could not lock %s in %s seconds' % (
+                            self._filename, timeout))
+                        if self._fh:
+                            self._fh.close()
+                        self._fh = open(self._filename, self._fallback_mode)
+                        return
+                    time.sleep(delay)
+
+        def unlock_and_close(self):
+            """Close and unlock the file using the win32 primitive."""
+            if self._locked:
+                try:
+                    hfile = win32file._get_osfhandle(self._fh.fileno())
+                    win32file.UnlockFileEx(hfile, 0, -0x10000,
+                                           pywintypes.OVERLAPPED())
+                except pywintypes.error as e:
+                    if e[0] != _Win32Opener.FILE_ALREADY_UNLOCKED_ERROR:
+                        raise
+            self._locked = False
+            if self._fh:
+                self._fh.close()
+except ImportError:
+    _Win32Opener = None
+
+
+class LockedFile(object):
+    """Represent a file that has exclusive access."""
+
+    @util.positional(4)
+    def __init__(self, filename, mode, fallback_mode, use_native_locking=True):
+        """Construct a LockedFile.
+
+        Args:
+            filename: string, The path of the file to open.
+            mode: string, The mode to try to open the file with.
+            fallback_mode: string, The mode to use if locking fails.
+            use_native_locking: bool, Whether or not fcntl/win32 locking is
+                                used.
+        """
+        opener = None
+        if not opener and use_native_locking:
+            if _Win32Opener:
+                opener = _Win32Opener(filename, mode, fallback_mode)
+            if _FcntlOpener:
+                opener = _FcntlOpener(filename, mode, fallback_mode)
+
+        if not opener:
+            opener = _PosixOpener(filename, mode, fallback_mode)
+
+        self._opener = opener
+
+    def filename(self):
+        """Return the filename we were constructed with."""
+        return self._opener._filename
+
+    def file_handle(self):
+        """Return the file_handle to the opened file."""
+        return self._opener.file_handle()
+
+    def is_locked(self):
+        """Return whether we successfully locked the file."""
+        return self._opener.is_locked()
+
+    def open_and_lock(self, timeout=0, delay=0.05):
+        """Open the file, trying to lock it.
+
+        Args:
+            timeout: float, The number of seconds to try to acquire the lock.
+            delay: float, The number of seconds to wait between retry attempts.
+
+        Raises:
+            AlreadyLockedException: if the lock is already acquired.
+            IOError: if the open fails.
+        """
+        self._opener.open_and_lock(timeout, delay)
+
+    def unlock_and_close(self):
+        """Unlock and close a file."""
+        self._opener.unlock_and_close()
diff --git a/utils/frozen_chromite/third_party/oauth2client/multistore_file.py b/utils/frozen_chromite/third_party/oauth2client/multistore_file.py
new file mode 100644
index 0000000..5a12797
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/multistore_file.py
@@ -0,0 +1,484 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Multi-credential file store with lock support.
+
+This module implements a JSON credential store where multiple
+credentials can be stored in one file. That file supports locking
+both in a single process and across processes.
+
+The credential themselves are keyed off of:
+
+* client_id
+* user_agent
+* scope
+
+The format of the stored data is like so::
+
+    {
+      'file_version': 1,
+      'data': [
+          {
+              'key': {
+                  'clientId': '<client id>',
+                  'userAgent': '<user agent>',
+                  'scope': '<scope>'
+              },
+              'credential': {
+                  # JSON serialized Credentials.
+              }
+          }
+      ]
+    }
+
+"""
+
+import errno
+import json
+import logging
+import os
+import threading
+
+from oauth2client.client import Credentials
+from oauth2client.client import Storage as BaseStorage
+from oauth2client import util
+from oauth2client.locked_file import LockedFile
+
+
+__author__ = '[email protected] (Joe Beda)'
+
+logger = logging.getLogger(__name__)
+
+# A dict from 'filename'->_MultiStore instances
+_multistores = {}
+_multistores_lock = threading.Lock()
+
+
+class Error(Exception):
+    """Base error for this module."""
+
+
+class NewerCredentialStoreError(Error):
+    """The credential store is a newer version than supported."""
+
+
[email protected](4)
+def get_credential_storage(filename, client_id, user_agent, scope,
+                           warn_on_readonly=True):
+    """Get a Storage instance for a credential.
+
+    Args:
+        filename: The JSON file storing a set of credentials
+        client_id: The client_id for the credential
+        user_agent: The user agent for the credential
+        scope: string or iterable of strings, Scope(s) being requested
+        warn_on_readonly: if True, log a warning if the store is readonly
+
+    Returns:
+        An object derived from client.Storage for getting/setting the
+        credential.
+    """
+    # Recreate the legacy key with these specific parameters
+    key = {'clientId': client_id, 'userAgent': user_agent,
+           'scope': util.scopes_to_string(scope)}
+    return get_credential_storage_custom_key(
+      filename, key, warn_on_readonly=warn_on_readonly)
+
+
[email protected](2)
+def get_credential_storage_custom_string_key(filename, key_string,
+                                             warn_on_readonly=True):
+    """Get a Storage instance for a credential using a single string as a key.
+
+    Allows you to provide a string as a custom key that will be used for
+    credential storage and retrieval.
+
+    Args:
+        filename: The JSON file storing a set of credentials
+        key_string: A string to use as the key for storing this credential.
+        warn_on_readonly: if True, log a warning if the store is readonly
+
+    Returns:
+        An object derived from client.Storage for getting/setting the
+        credential.
+    """
+    # Create a key dictionary that can be used
+    key_dict = {'key': key_string}
+    return get_credential_storage_custom_key(
+      filename, key_dict, warn_on_readonly=warn_on_readonly)
+
+
[email protected](2)
+def get_credential_storage_custom_key(filename, key_dict,
+                                      warn_on_readonly=True):
+    """Get a Storage instance for a credential using a dictionary as a key.
+
+    Allows you to provide a dictionary as a custom key that will be used for
+    credential storage and retrieval.
+
+    Args:
+        filename: The JSON file storing a set of credentials
+        key_dict: A dictionary to use as the key for storing this credential.
+                  There is no ordering of the keys in the dictionary. Logically
+                  equivalent dictionaries will produce equivalent storage keys.
+        warn_on_readonly: if True, log a warning if the store is readonly
+
+    Returns:
+        An object derived from client.Storage for getting/setting the
+        credential.
+    """
+    multistore = _get_multistore(filename, warn_on_readonly=warn_on_readonly)
+    key = util.dict_to_tuple_key(key_dict)
+    return multistore._get_storage(key)
+
+
[email protected](1)
+def get_all_credential_keys(filename, warn_on_readonly=True):
+    """Gets all the registered credential keys in the given Multistore.
+
+    Args:
+        filename: The JSON file storing a set of credentials
+        warn_on_readonly: if True, log a warning if the store is readonly
+
+    Returns:
+        A list of the credential keys present in the file.  They are returned
+        as dictionaries that can be passed into
+        get_credential_storage_custom_key to get the actual credentials.
+    """
+    multistore = _get_multistore(filename, warn_on_readonly=warn_on_readonly)
+    multistore._lock()
+    try:
+        return multistore._get_all_credential_keys()
+    finally:
+        multistore._unlock()
+
+
[email protected](1)
+def _get_multistore(filename, warn_on_readonly=True):
+    """A helper method to initialize the multistore with proper locking.
+
+    Args:
+        filename: The JSON file storing a set of credentials
+        warn_on_readonly: if True, log a warning if the store is readonly
+
+    Returns:
+        A multistore object
+    """
+    filename = os.path.expanduser(filename)
+    _multistores_lock.acquire()
+    try:
+        multistore = _multistores.setdefault(
+            filename, _MultiStore(filename, warn_on_readonly=warn_on_readonly))
+    finally:
+        _multistores_lock.release()
+    return multistore
+
+
+class _MultiStore(object):
+    """A file backed store for multiple credentials."""
+
+    @util.positional(2)
+    def __init__(self, filename, warn_on_readonly=True):
+        """Initialize the class.
+
+        This will create the file if necessary.
+        """
+        self._file = LockedFile(filename, 'r+', 'r')
+        self._thread_lock = threading.Lock()
+        self._read_only = False
+        self._warn_on_readonly = warn_on_readonly
+
+        self._create_file_if_needed()
+
+        # Cache of deserialized store. This is only valid after the
+        # _MultiStore is locked or _refresh_data_cache is called. This is
+        # of the form of:
+        #
+        # ((key, value), (key, value)...) -> OAuth2Credential
+        #
+        # If this is None, then the store hasn't been read yet.
+        self._data = None
+
+    class _Storage(BaseStorage):
+        """A Storage object that can read/write a single credential."""
+
+        def __init__(self, multistore, key):
+            self._multistore = multistore
+            self._key = key
+
+        def acquire_lock(self):
+            """Acquires any lock necessary to access this Storage.
+
+            This lock is not reentrant.
+            """
+            self._multistore._lock()
+
+        def release_lock(self):
+            """Release the Storage lock.
+
+            Trying to release a lock that isn't held will result in a
+            RuntimeError.
+            """
+            self._multistore._unlock()
+
+        def locked_get(self):
+            """Retrieve credential.
+
+            The Storage lock must be held when this is called.
+
+            Returns:
+                oauth2client.client.Credentials
+            """
+            credential = self._multistore._get_credential(self._key)
+            if credential:
+                credential.set_store(self)
+            return credential
+
+        def locked_put(self, credentials):
+            """Write a credential.
+
+            The Storage lock must be held when this is called.
+
+            Args:
+                credentials: Credentials, the credentials to store.
+            """
+            self._multistore._update_credential(self._key, credentials)
+
+        def locked_delete(self):
+            """Delete a credential.
+
+            The Storage lock must be held when this is called.
+
+            Args:
+                credentials: Credentials, the credentials to store.
+            """
+            self._multistore._delete_credential(self._key)
+
+    def _create_file_if_needed(self):
+        """Create an empty file if necessary.
+
+        This method will not initialize the file. Instead it implements a
+        simple version of "touch" to ensure the file has been created.
+        """
+        if not os.path.exists(self._file.filename()):
+            old_umask = os.umask(0o177)
+            try:
+                open(self._file.filename(), 'a+b').close()
+            finally:
+                os.umask(old_umask)
+
+    def _lock(self):
+        """Lock the entire multistore."""
+        self._thread_lock.acquire()
+        try:
+            self._file.open_and_lock()
+        except IOError as e:
+            if e.errno == errno.ENOSYS:
+                logger.warn('File system does not support locking the '
+                            'credentials file.')
+            elif e.errno == errno.ENOLCK:
+                logger.warn('File system is out of resources for writing the '
+                            'credentials file (is your disk full?).')
+            elif e.errno == errno.EDEADLK:
+                logger.warn('Lock contention on multistore file, opening '
+                            'in read-only mode.')
+            else:
+                raise
+        if not self._file.is_locked():
+            self._read_only = True
+            if self._warn_on_readonly:
+                logger.warn('The credentials file (%s) is not writable. '
+                            'Opening in read-only mode. Any refreshed '
+                            'credentials will only be '
+                            'valid for this run.', self._file.filename())
+        if os.path.getsize(self._file.filename()) == 0:
+            logger.debug('Initializing empty multistore file')
+            # The multistore is empty so write out an empty file.
+            self._data = {}
+            self._write()
+        elif not self._read_only or self._data is None:
+            # Only refresh the data if we are read/write or we haven't
+            # cached the data yet. If we are readonly, we assume is isn't
+            # changing out from under us and that we only have to read it
+            # once. This prevents us from whacking any new access keys that
+            # we have cached in memory but were unable to write out.
+            self._refresh_data_cache()
+
+    def _unlock(self):
+        """Release the lock on the multistore."""
+        self._file.unlock_and_close()
+        self._thread_lock.release()
+
+    def _locked_json_read(self):
+        """Get the raw content of the multistore file.
+
+        The multistore must be locked when this is called.
+
+        Returns:
+            The contents of the multistore decoded as JSON.
+        """
+        assert self._thread_lock.locked()
+        self._file.file_handle().seek(0)
+        return json.load(self._file.file_handle())
+
+    def _locked_json_write(self, data):
+        """Write a JSON serializable data structure to the multistore.
+
+        The multistore must be locked when this is called.
+
+        Args:
+            data: The data to be serialized and written.
+        """
+        assert self._thread_lock.locked()
+        if self._read_only:
+            return
+        self._file.file_handle().seek(0)
+        json.dump(data, self._file.file_handle(),
+                  sort_keys=True, indent=2, separators=(',', ': '))
+        self._file.file_handle().truncate()
+
+    def _refresh_data_cache(self):
+        """Refresh the contents of the multistore.
+
+        The multistore must be locked when this is called.
+
+        Raises:
+            NewerCredentialStoreError: Raised when a newer client has written
+            the store.
+        """
+        self._data = {}
+        try:
+            raw_data = self._locked_json_read()
+        except Exception:
+            logger.warn('Credential data store could not be loaded. '
+                        'Will ignore and overwrite.')
+            return
+
+        version = 0
+        try:
+            version = raw_data['file_version']
+        except Exception:
+            logger.warn('Missing version for credential data store. It may be '
+                        'corrupt or an old version. Overwriting.')
+        if version > 1:
+            raise NewerCredentialStoreError(
+                'Credential file has file_version of %d. '
+                'Only file_version of 1 is supported.' % version)
+
+        credentials = []
+        try:
+            credentials = raw_data['data']
+        except (TypeError, KeyError):
+            pass
+
+        for cred_entry in credentials:
+            try:
+                key, credential = self._decode_credential_from_json(cred_entry)
+                self._data[key] = credential
+            except:
+                # If something goes wrong loading a credential, just ignore it
+                logger.info('Error decoding credential, skipping',
+                            exc_info=True)
+
+    def _decode_credential_from_json(self, cred_entry):
+        """Load a credential from our JSON serialization.
+
+        Args:
+            cred_entry: A dict entry from the data member of our format
+
+        Returns:
+            (key, cred) where the key is the key tuple and the cred is the
+            OAuth2Credential object.
+        """
+        raw_key = cred_entry['key']
+        key = util.dict_to_tuple_key(raw_key)
+        credential = None
+        credential = Credentials.new_from_json(
+            json.dumps(cred_entry['credential']))
+        return (key, credential)
+
+    def _write(self):
+        """Write the cached data back out.
+
+        The multistore must be locked.
+        """
+        raw_data = {'file_version': 1}
+        raw_creds = []
+        raw_data['data'] = raw_creds
+        for (cred_key, cred) in self._data.items():
+            raw_key = dict(cred_key)
+            raw_cred = json.loads(cred.to_json())
+            raw_creds.append({'key': raw_key, 'credential': raw_cred})
+        self._locked_json_write(raw_data)
+
+    def _get_all_credential_keys(self):
+        """Gets all the registered credential keys in the multistore.
+
+        Returns:
+            A list of dictionaries corresponding to all the keys currently
+            registered
+        """
+        return [dict(key) for key in self._data.keys()]
+
+    def _get_credential(self, key):
+        """Get a credential from the multistore.
+
+        The multistore must be locked.
+
+        Args:
+            key: The key used to retrieve the credential
+
+        Returns:
+            The credential specified or None if not present
+        """
+        return self._data.get(key, None)
+
+    def _update_credential(self, key, cred):
+        """Update a credential and write the multistore.
+
+        This must be called when the multistore is locked.
+
+        Args:
+            key: The key used to retrieve the credential
+            cred: The OAuth2Credential to update/set
+        """
+        self._data[key] = cred
+        self._write()
+
+    def _delete_credential(self, key):
+        """Delete a credential and write the multistore.
+
+        This must be called when the multistore is locked.
+
+        Args:
+            key: The key used to retrieve the credential
+        """
+        try:
+            del self._data[key]
+        except KeyError:
+            pass
+        self._write()
+
+    def _get_storage(self, key):
+        """Get a Storage object to get/set a credential.
+
+        This Storage is a 'view' into the multistore.
+
+        Args:
+            key: The key used to retrieve the credential
+
+        Returns:
+            A Storage object that can be used to get/set this cred
+        """
+        return self._Storage(self, key)
diff --git a/utils/frozen_chromite/third_party/oauth2client/service_account.py b/utils/frozen_chromite/third_party/oauth2client/service_account.py
new file mode 100644
index 0000000..8d3dc65
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/service_account.py
@@ -0,0 +1,133 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A service account credentials class.
+
+This credentials class is implemented on top of rsa library.
+"""
+
+import base64
+import time
+
+from pyasn1.codec.ber import decoder
+from pyasn1_modules.rfc5208 import PrivateKeyInfo
+import rsa
+
+from oauth2client import GOOGLE_REVOKE_URI
+from oauth2client import GOOGLE_TOKEN_URI
+from oauth2client._helpers import _json_encode
+from oauth2client._helpers import _to_bytes
+from oauth2client._helpers import _urlsafe_b64encode
+from oauth2client import util
+from oauth2client.client import AssertionCredentials
+
+
+class _ServiceAccountCredentials(AssertionCredentials):
+    """Class representing a service account (signed JWT) credential."""
+
+    MAX_TOKEN_LIFETIME_SECS = 3600  # 1 hour in seconds
+
+    def __init__(self, service_account_id, service_account_email,
+                 private_key_id, private_key_pkcs8_text, scopes,
+                 user_agent=None, token_uri=GOOGLE_TOKEN_URI,
+                 revoke_uri=GOOGLE_REVOKE_URI, **kwargs):
+
+        super(_ServiceAccountCredentials, self).__init__(
+            None, user_agent=user_agent, token_uri=token_uri,
+            revoke_uri=revoke_uri)
+
+        self._service_account_id = service_account_id
+        self._service_account_email = service_account_email
+        self._private_key_id = private_key_id
+        self._private_key = _get_private_key(private_key_pkcs8_text)
+        self._private_key_pkcs8_text = private_key_pkcs8_text
+        self._scopes = util.scopes_to_string(scopes)
+        self._user_agent = user_agent
+        self._token_uri = token_uri
+        self._revoke_uri = revoke_uri
+        self._kwargs = kwargs
+
+    def _generate_assertion(self):
+        """Generate the assertion that will be used in the request."""
+
+        header = {
+            'alg': 'RS256',
+            'typ': 'JWT',
+            'kid': self._private_key_id
+        }
+
+        now = int(time.time())
+        payload = {
+            'aud': self._token_uri,
+            'scope': self._scopes,
+            'iat': now,
+            'exp': now + _ServiceAccountCredentials.MAX_TOKEN_LIFETIME_SECS,
+            'iss': self._service_account_email
+        }
+        payload.update(self._kwargs)
+
+        first_segment = _urlsafe_b64encode(_json_encode(header))
+        second_segment = _urlsafe_b64encode(_json_encode(payload))
+        assertion_input = first_segment + b'.' + second_segment
+
+        # Sign the assertion.
+        rsa_bytes = rsa.pkcs1.sign(assertion_input, self._private_key,
+                                   'SHA-256')
+        signature = base64.urlsafe_b64encode(rsa_bytes).rstrip(b'=')
+
+        return assertion_input + b'.' + signature
+
+    def sign_blob(self, blob):
+        # Ensure that it is bytes
+        blob = _to_bytes(blob, encoding='utf-8')
+        return (self._private_key_id,
+                rsa.pkcs1.sign(blob, self._private_key, 'SHA-256'))
+
+    @property
+    def service_account_email(self):
+        return self._service_account_email
+
+    @property
+    def serialization_data(self):
+        return {
+            'type': 'service_account',
+            'client_id': self._service_account_id,
+            'client_email': self._service_account_email,
+            'private_key_id': self._private_key_id,
+            'private_key': self._private_key_pkcs8_text
+        }
+
+    def create_scoped_required(self):
+        return not self._scopes
+
+    def create_scoped(self, scopes):
+        return _ServiceAccountCredentials(self._service_account_id,
+                                          self._service_account_email,
+                                          self._private_key_id,
+                                          self._private_key_pkcs8_text,
+                                          scopes,
+                                          user_agent=self._user_agent,
+                                          token_uri=self._token_uri,
+                                          revoke_uri=self._revoke_uri,
+                                          **self._kwargs)
+
+
+def _get_private_key(private_key_pkcs8_text):
+    """Get an RSA private key object from a pkcs8 representation."""
+    private_key_pkcs8_text = _to_bytes(private_key_pkcs8_text)
+    der = rsa.pem.load_pem(private_key_pkcs8_text, 'PRIVATE KEY')
+    asn1_private_key, _ = decoder.decode(der, asn1Spec=PrivateKeyInfo())
+    return rsa.PrivateKey.load_pkcs1(
+        asn1_private_key.getComponentByName('privateKey').asOctets(),
+        format='DER')
diff --git a/utils/frozen_chromite/third_party/oauth2client/tools.py b/utils/frozen_chromite/third_party/oauth2client/tools.py
new file mode 100644
index 0000000..629866b
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/tools.py
@@ -0,0 +1,244 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Command-line tools for authenticating via OAuth 2.0
+
+Do the OAuth 2.0 Web Server dance for a command line application. Stores the
+generated credentials in a common file that is used by other example apps in
+the same directory.
+"""
+
+from __future__ import print_function
+
+import logging
+import socket
+import sys
+
+from six.moves import BaseHTTPServer
+from six.moves import urllib
+from six.moves import input
+
+from oauth2client import client
+from oauth2client import util
+
+
+__author__ = '[email protected] (Joe Gregorio)'
+__all__ = ['argparser', 'run_flow', 'message_if_missing']
+
+_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
+
+To make this sample run you will need to populate the client_secrets.json file
+found at:
+
+   %s
+
+with information from the APIs Console <https://code.google.com/apis/console>.
+
+"""
+
+
+def _CreateArgumentParser():
+    try:
+        import argparse
+    except ImportError:
+        return None
+    parser = argparse.ArgumentParser(add_help=False)
+    parser.add_argument('--auth_host_name', default='localhost',
+                        help='Hostname when running a local web server.')
+    parser.add_argument('--noauth_local_webserver', action='store_true',
+                        default=False, help='Do not run a local web server.')
+    parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
+                        nargs='*', help='Port web server should listen on.')
+    parser.add_argument(
+        '--logging_level', default='ERROR',
+        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
+        help='Set the logging level of detail.')
+    return parser
+
+# argparser is an ArgumentParser that contains command-line options expected
+# by tools.run(). Pass it in as part of the 'parents' argument to your own
+# ArgumentParser.
+argparser = _CreateArgumentParser()
+
+
+class ClientRedirectServer(BaseHTTPServer.HTTPServer):
+    """A server to handle OAuth 2.0 redirects back to localhost.
+
+    Waits for a single request and parses the query parameters
+    into query_params and then stops serving.
+    """
+    query_params = {}
+
+
+class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+    """A handler for OAuth 2.0 redirects back to localhost.
+
+    Waits for a single request and parses the query parameters
+    into the servers query_params and then stops serving.
+    """
+
+    def do_GET(self):
+        """Handle a GET request.
+
+        Parses the query parameters and prints a message
+        if the flow has completed. Note that we can't detect
+        if an error occurred.
+        """
+        self.send_response(200)
+        self.send_header("Content-type", "text/html")
+        self.end_headers()
+        query = self.path.split('?', 1)[-1]
+        query = dict(urllib.parse.parse_qsl(query))
+        self.server.query_params = query
+        self.wfile.write(
+            b"<html><head><title>Authentication Status</title></head>")
+        self.wfile.write(
+            b"<body><p>The authentication flow has completed.</p>")
+        self.wfile.write(b"</body></html>")
+
+    def log_message(self, format, *args):
+        """Do not log messages to stdout while running as cmd. line program."""
+
+
[email protected](3)
+def run_flow(flow, storage, flags, http=None):
+    """Core code for a command-line application.
+
+    The ``run()`` function is called from your application and runs
+    through all the steps to obtain credentials. It takes a ``Flow``
+    argument and attempts to open an authorization server page in the
+    user's default web browser. The server asks the user to grant your
+    application access to the user's data. If the user grants access,
+    the ``run()`` function returns new credentials. The new credentials
+    are also stored in the ``storage`` argument, which updates the file
+    associated with the ``Storage`` object.
+
+    It presumes it is run from a command-line application and supports the
+    following flags:
+
+        ``--auth_host_name`` (string, default: ``localhost``)
+           Host name to use when running a local web server to handle
+           redirects during OAuth authorization.
+
+        ``--auth_host_port`` (integer, default: ``[8080, 8090]``)
+           Port to use when running a local web server to handle redirects
+           during OAuth authorization. Repeat this option to specify a list
+           of values.
+
+        ``--[no]auth_local_webserver`` (boolean, default: ``True``)
+           Run a local web server to handle redirects during OAuth
+           authorization.
+
+    The tools module defines an ``ArgumentParser`` the already contains the
+    flag definitions that ``run()`` requires. You can pass that
+    ``ArgumentParser`` to your ``ArgumentParser`` constructor::
+
+        parser = argparse.ArgumentParser(
+            description=__doc__,
+            formatter_class=argparse.RawDescriptionHelpFormatter,
+            parents=[tools.argparser])
+        flags = parser.parse_args(argv)
+
+    Args:
+        flow: Flow, an OAuth 2.0 Flow to step through.
+        storage: Storage, a ``Storage`` to store the credential in.
+        flags: ``argparse.Namespace``, The command-line flags. This is the
+               object returned from calling ``parse_args()`` on
+               ``argparse.ArgumentParser`` as described above.
+        http: An instance of ``httplib2.Http.request`` or something that
+              acts like it.
+
+    Returns:
+        Credentials, the obtained credential.
+    """
+    logging.getLogger().setLevel(getattr(logging, flags.logging_level))
+    if not flags.noauth_local_webserver:
+        success = False
+        port_number = 0
+        for port in flags.auth_host_port:
+            port_number = port
+            try:
+                httpd = ClientRedirectServer((flags.auth_host_name, port),
+                                             ClientRedirectHandler)
+            except socket.error:
+                pass
+            else:
+                success = True
+                break
+        flags.noauth_local_webserver = not success
+        if not success:
+            print('Failed to start a local webserver listening '
+                  'on either port 8080')
+            print('or port 8090. Please check your firewall settings and locally')
+            print('running programs that may be blocking or using those ports.')
+            print()
+            print('Falling back to --noauth_local_webserver and continuing with')
+            print('authorization.')
+            print()
+
+    if not flags.noauth_local_webserver:
+        oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
+    else:
+        oauth_callback = client.OOB_CALLBACK_URN
+    flow.redirect_uri = oauth_callback
+    authorize_url = flow.step1_get_authorize_url()
+
+    if not flags.noauth_local_webserver:
+        import webbrowser
+        webbrowser.open(authorize_url, new=1, autoraise=True)
+        print('Your browser has been opened to visit:')
+        print()
+        print('    ' + authorize_url)
+        print()
+        print('If your browser is on a different machine then '
+              'exit and re-run this')
+        print('application with the command-line parameter ')
+        print()
+        print('  --noauth_local_webserver')
+        print()
+    else:
+        print('Go to the following link in your browser:')
+        print()
+        print('    ' + authorize_url)
+        print()
+
+    code = None
+    if not flags.noauth_local_webserver:
+        httpd.handle_request()
+        if 'error' in httpd.query_params:
+            sys.exit('Authentication request was rejected.')
+        if 'code' in httpd.query_params:
+            code = httpd.query_params['code']
+        else:
+            print('Failed to find "code" in the query parameters '
+                  'of the redirect.')
+            sys.exit('Try running with --noauth_local_webserver.')
+    else:
+        code = input('Enter verification code: ').strip()
+
+    try:
+        credential = flow.step2_exchange(code, http=http)
+    except client.FlowExchangeError as e:
+        sys.exit('Authentication has failed: %s' % e)
+
+    storage.put(credential)
+    credential.set_store(storage)
+    print('Authentication successful.')
+
+    return credential
+
+
+def message_if_missing(filename):
+    """Helpful message to display if the CLIENT_SECRETS file is missing."""
+    return _CLIENT_SECRETS_MESSAGE % filename
diff --git a/utils/frozen_chromite/third_party/oauth2client/util.py b/utils/frozen_chromite/third_party/oauth2client/util.py
new file mode 100644
index 0000000..1150e2b
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/util.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Common utility library."""
+
+import functools
+import inspect
+import logging
+
+import six
+from six.moves import urllib
+
+
+__author__ = [
+    '[email protected] (Rafe Kaplan)',
+    '[email protected] (Guido van Rossum)',
+]
+
+__all__ = [
+    'positional',
+    'POSITIONAL_WARNING',
+    'POSITIONAL_EXCEPTION',
+    'POSITIONAL_IGNORE',
+]
+
+logger = logging.getLogger(__name__)
+
+POSITIONAL_WARNING = 'WARNING'
+POSITIONAL_EXCEPTION = 'EXCEPTION'
+POSITIONAL_IGNORE = 'IGNORE'
+POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
+                            POSITIONAL_IGNORE])
+
+positional_parameters_enforcement = POSITIONAL_WARNING
+
+
+def positional(max_positional_args):
+    """A decorator to declare that only the first N arguments my be positional.
+
+    This decorator makes it easy to support Python 3 style keyword-only
+    parameters. For example, in Python 3 it is possible to write::
+
+        def fn(pos1, *, kwonly1=None, kwonly1=None):
+            ...
+
+    All named parameters after ``*`` must be a keyword::
+
+        fn(10, 'kw1', 'kw2')  # Raises exception.
+        fn(10, kwonly1='kw1')  # Ok.
+
+    Example
+    ^^^^^^^
+
+    To define a function like above, do::
+
+        @positional(1)
+        def fn(pos1, kwonly1=None, kwonly2=None):
+            ...
+
+    If no default value is provided to a keyword argument, it becomes a
+    required keyword argument::
+
+        @positional(0)
+        def fn(required_kw):
+            ...
+
+    This must be called with the keyword parameter::
+
+        fn()  # Raises exception.
+        fn(10)  # Raises exception.
+        fn(required_kw=10)  # Ok.
+
+    When defining instance or class methods always remember to account for
+    ``self`` and ``cls``::
+
+        class MyClass(object):
+
+            @positional(2)
+            def my_method(self, pos1, kwonly1=None):
+                ...
+
+            @classmethod
+            @positional(2)
+            def my_method(cls, pos1, kwonly1=None):
+                ...
+
+    The positional decorator behavior is controlled by
+    ``util.positional_parameters_enforcement``, which may be set to
+    ``POSITIONAL_EXCEPTION``, ``POSITIONAL_WARNING`` or
+    ``POSITIONAL_IGNORE`` to raise an exception, log a warning, or do
+    nothing, respectively, if a declaration is violated.
+
+    Args:
+        max_positional_arguments: Maximum number of positional arguments. All
+                                  parameters after the this index must be
+                                  keyword only.
+
+    Returns:
+        A decorator that prevents using arguments after max_positional_args
+        from being used as positional parameters.
+
+    Raises:
+        TypeError: if a key-word only argument is provided as a positional
+                   parameter, but only if
+                   util.positional_parameters_enforcement is set to
+                   POSITIONAL_EXCEPTION.
+    """
+
+    def positional_decorator(wrapped):
+        @functools.wraps(wrapped)
+        def positional_wrapper(*args, **kwargs):
+            if len(args) > max_positional_args:
+                plural_s = ''
+                if max_positional_args != 1:
+                    plural_s = 's'
+                message = ('%s() takes at most %d positional '
+                           'argument%s (%d given)' % (
+                               wrapped.__name__, max_positional_args,
+                               plural_s, len(args)))
+                if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
+                    raise TypeError(message)
+                elif positional_parameters_enforcement == POSITIONAL_WARNING:
+                    logger.warning(message)
+                else:  # IGNORE
+                    pass
+            return wrapped(*args, **kwargs)
+        return positional_wrapper
+
+    if isinstance(max_positional_args, six.integer_types):
+        return positional_decorator
+    else:
+        args, _, _, defaults = inspect.getargspec(max_positional_args)
+        return positional(len(args) - len(defaults))(max_positional_args)
+
+
+def scopes_to_string(scopes):
+    """Converts scope value to a string.
+
+    If scopes is a string then it is simply passed through. If scopes is an
+    iterable then a string is returned that is all the individual scopes
+    concatenated with spaces.
+
+    Args:
+        scopes: string or iterable of strings, the scopes.
+
+    Returns:
+        The scopes formatted as a single string.
+    """
+    if isinstance(scopes, six.string_types):
+        return scopes
+    else:
+        return ' '.join(scopes)
+
+
+def string_to_scopes(scopes):
+    """Converts stringifed scope value to a list.
+
+    If scopes is a list then it is simply passed through. If scopes is an
+    string then a list of each individual scope is returned.
+
+    Args:
+        scopes: a string or iterable of strings, the scopes.
+
+    Returns:
+        The scopes in a list.
+    """
+    if not scopes:
+        return []
+    if isinstance(scopes, six.string_types):
+        return scopes.split(' ')
+    else:
+        return scopes
+
+
+def dict_to_tuple_key(dictionary):
+    """Converts a dictionary to a tuple that can be used as an immutable key.
+
+    The resulting key is always sorted so that logically equivalent
+    dictionaries always produce an identical tuple for a key.
+
+    Args:
+        dictionary: the dictionary to use as the key.
+
+    Returns:
+        A tuple representing the dictionary in it's naturally sorted ordering.
+    """
+    return tuple(sorted(dictionary.items()))
+
+
+def _add_query_parameter(url, name, value):
+    """Adds a query parameter to a url.
+
+    Replaces the current value if it already exists in the URL.
+
+    Args:
+        url: string, url to add the query parameter to.
+        name: string, query parameter name.
+        value: string, query parameter value.
+
+    Returns:
+        Updated query parameter. Does not update the url if value is None.
+    """
+    if value is None:
+        return url
+    else:
+        parsed = list(urllib.parse.urlparse(url))
+        q = dict(urllib.parse.parse_qsl(parsed[4]))
+        q[name] = value
+        parsed[4] = urllib.parse.urlencode(q)
+        return urllib.parse.urlunparse(parsed)
diff --git a/utils/frozen_chromite/third_party/oauth2client/xsrfutil.py b/utils/frozen_chromite/third_party/oauth2client/xsrfutil.py
new file mode 100644
index 0000000..10bbe3f
--- /dev/null
+++ b/utils/frozen_chromite/third_party/oauth2client/xsrfutil.py
@@ -0,0 +1,107 @@
+#
+# Copyright 2014 the Melange authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper methods for creating & verifying XSRF tokens."""
+
+import base64
+import binascii
+import hmac
+import time
+
+from oauth2client._helpers import _to_bytes
+from oauth2client import util
+
+__authors__ = [
+    '"Doug Coker" <[email protected]>',
+    '"Joe Gregorio" <[email protected]>',
+]
+
+# Delimiter character
+DELIMITER = b':'
+
+# 1 hour in seconds
+DEFAULT_TIMEOUT_SECS = 60 * 60
+
+
[email protected](2)
+def generate_token(key, user_id, action_id='', when=None):
+    """Generates a URL-safe token for the given user, action, time tuple.
+
+    Args:
+        key: secret key to use.
+        user_id: the user ID of the authenticated user.
+        action_id: a string identifier of the action they requested
+                   authorization for.
+        when: the time in seconds since the epoch at which the user was
+              authorized for this action. If not set the current time is used.
+
+    Returns:
+        A string XSRF protection token.
+    """
+    digester = hmac.new(_to_bytes(key, encoding='utf-8'))
+    digester.update(_to_bytes(str(user_id), encoding='utf-8'))
+    digester.update(DELIMITER)
+    digester.update(_to_bytes(action_id, encoding='utf-8'))
+    digester.update(DELIMITER)
+    when = _to_bytes(str(when or int(time.time())), encoding='utf-8')
+    digester.update(when)
+    digest = digester.digest()
+
+    token = base64.urlsafe_b64encode(digest + DELIMITER + when)
+    return token
+
+
[email protected](3)
+def validate_token(key, token, user_id, action_id="", current_time=None):
+    """Validates that the given token authorizes the user for the action.
+
+    Tokens are invalid if the time of issue is too old or if the token
+    does not match what generateToken outputs (i.e. the token was forged).
+
+    Args:
+        key: secret key to use.
+        token: a string of the token generated by generateToken.
+        user_id: the user ID of the authenticated user.
+        action_id: a string identifier of the action they requested
+                   authorization for.
+
+    Returns:
+        A boolean - True if the user is authorized for the action, False
+        otherwise.
+    """
+    if not token:
+        return False
+    try:
+        decoded = base64.urlsafe_b64decode(token)
+        token_time = int(decoded.split(DELIMITER)[-1])
+    except (TypeError, ValueError, binascii.Error):
+        return False
+    if current_time is None:
+        current_time = time.time()
+    # If the token is too old it's not valid.
+    if current_time - token_time > DEFAULT_TIMEOUT_SECS:
+        return False
+
+    # The given token should match the generated one with the same time.
+    expected_token = generate_token(key, user_id, action_id=action_id,
+                                    when=token_time)
+    if len(token) != len(expected_token):
+        return False
+
+    # Perform constant time comparison to avoid timing attacks
+    different = 0
+    for x, y in zip(bytearray(token), bytearray(expected_token)):
+        different |= x ^ y
+    return not different
diff --git a/utils/frozen_chromite/third_party/uritemplate/__init__.py b/utils/frozen_chromite/third_party/uritemplate/__init__.py
new file mode 100644
index 0000000..0e7f415
--- /dev/null
+++ b/utils/frozen_chromite/third_party/uritemplate/__init__.py
@@ -0,0 +1,265 @@
+#!/usr/bin/env python
+
+"""
+URI Template (RFC6570) Processor
+"""
+
+__copyright__ = """\
+Copyright 2011-2013 Joe Gregorio
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import re
+try:
+   from urllib.parse import quote
+except ImportError:
+   from urllib import quote
+
+
+
+__version__ = "0.6"
+
+RESERVED = ":/?#[]@!$&'()*+,;="
+OPERATOR = "+#./;?&|!@"
+MODIFIER = ":^"
+TEMPLATE = re.compile(r"{([^\}]+)}")
+
+
+def variables(template):
+    '''Returns the set of keywords in a uri template'''
+    vars = set()
+    for varlist in TEMPLATE.findall(template):
+        if varlist[0] in OPERATOR:
+            varlist = varlist[1:]
+        varspecs = varlist.split(',')
+        for var in varspecs:
+            # handle prefix values
+            var = var.split(':')[0]
+            # handle composite values
+            if var.endswith('*'):
+                var = var[:-1]
+            vars.add(var)
+    return vars
+
+
+def _quote(value, safe, prefix=None):
+    if prefix is not None:
+        return quote(str(value)[:prefix], safe)
+    return quote(str(value), safe)
+
+
+def _tostring(varname, value, explode, prefix, operator, safe=""):
+    if isinstance(value, list):
+        return ",".join([_quote(x, safe) for x in value])
+    if isinstance(value, dict):
+        keys = sorted(value.keys())
+        if explode:
+            return ",".join([_quote(key, safe) + "=" + \
+                             _quote(value[key], safe) for key in keys])
+        else:
+            return ",".join([_quote(key, safe) + "," + \
+                             _quote(value[key], safe) for key in keys])
+    elif value is None:
+        return
+    else:
+        return _quote(value, safe, prefix)
+
+
+def _tostring_path(varname, value, explode, prefix, operator, safe=""):
+    joiner = operator
+    if isinstance(value, list):
+        if explode:
+            out = [_quote(x, safe) for x in value if value is not None]
+        else:
+            joiner = ","
+            out = [_quote(x, safe) for x in value if value is not None]
+        if out:
+            return joiner.join(out)
+        else:
+            return
+    elif isinstance(value, dict):
+        keys = sorted(value.keys())
+        if explode:
+            out = [_quote(key, safe) + "=" + \
+                   _quote(value[key], safe) for key in keys \
+                   if value[key] is not None]
+        else:
+            joiner = ","
+            out = [_quote(key, safe) + "," + \
+                   _quote(value[key], safe) \
+                   for key in keys if value[key] is not None]
+        if out:
+            return joiner.join(out)
+        else:
+            return
+    elif value is None:
+        return
+    else:
+        return _quote(value, safe, prefix)
+
+
+def _tostring_semi(varname, value, explode, prefix, operator, safe=""):
+    joiner = operator
+    if operator == "?":
+        joiner = "&"
+    if isinstance(value, list):
+        if explode:
+            out = [varname + "=" + _quote(x, safe) \
+                   for x in value if x is not None]
+            if out:
+                return joiner.join(out)
+            else:
+                return
+        else:
+            return varname + "=" + ",".join([_quote(x, safe) \
+                                             for x in value])
+    elif isinstance(value, dict):
+        keys = sorted(value.keys())
+        if explode:
+            return joiner.join([_quote(key, safe) + "=" + \
+                                _quote(value[key], safe) \
+                                for key in keys if key is not None])
+        else:
+            return varname + "=" + ",".join([_quote(key, safe) + "," + \
+                             _quote(value[key], safe) for key in keys \
+                             if key is not None])
+    else:
+        if value is None:
+            return
+        elif value:
+            return (varname + "=" + _quote(value, safe, prefix))
+        else:
+            return varname
+
+
+def _tostring_query(varname, value, explode, prefix, operator, safe=""):
+    joiner = operator
+    if operator in ["?", "&"]:
+        joiner = "&"
+    if isinstance(value, list):
+        if 0 == len(value):
+            return None
+        if explode:
+            return joiner.join([varname + "=" + _quote(x, safe) \
+                                for x in value])
+        else:
+            return (varname + "=" + ",".join([_quote(x, safe) \
+                                             for x in value]))
+    elif isinstance(value, dict):
+        if 0 == len(value):
+            return None
+        keys = sorted(value.keys())
+        if explode:
+            return joiner.join([_quote(key, safe) + "=" + \
+                                _quote(value[key], safe) \
+                                for key in keys])
+        else:
+            return varname + "=" + \
+                   ",".join([_quote(key, safe) + "," + \
+                             _quote(value[key], safe) for key in keys])
+    else:
+        if value is None:
+            return
+        elif value:
+            return (varname + "=" + _quote(value, safe, prefix))
+        else:
+            return (varname  + "=")
+
+
+TOSTRING = {
+    "" : _tostring,
+    "+": _tostring,
+    "#": _tostring,
+    ";": _tostring_semi,
+    "?": _tostring_query,
+    "&": _tostring_query,
+    "/": _tostring_path,
+    ".": _tostring_path,
+    }
+
+
+def expand(template, variables):
+    """
+    Expand template as a URI Template using variables.
+    """
+    def _sub(match):
+        expression = match.group(1)
+        operator = ""
+        if expression[0] in OPERATOR:
+            operator = expression[0]
+            varlist = expression[1:]
+        else:
+            varlist = expression
+
+        safe = ""
+        if operator in ["+", "#"]:
+            safe = RESERVED
+        varspecs = varlist.split(",")
+        varnames = []
+        defaults = {}
+        for varspec in varspecs:
+            default = None
+            explode = False
+            prefix = None
+            if "=" in varspec:
+                varname, default = tuple(varspec.split("=", 1))
+            else:
+                varname = varspec
+            if varname[-1] == "*":
+                explode = True
+                varname = varname[:-1]
+            elif ":" in varname:
+                try:
+                    prefix = int(varname[varname.index(":")+1:])
+                except ValueError:
+                    raise ValueError("non-integer prefix '{0}'".format(
+                       varname[varname.index(":")+1:]))
+                varname = varname[:varname.index(":")]
+            if default:
+                defaults[varname] = default
+            varnames.append((varname, explode, prefix))
+
+        retval = []
+        joiner = operator
+        start = operator
+        if operator == "+":
+            start = ""
+            joiner = ","
+        if operator == "#":
+            joiner = ","
+        if operator == "?":
+            joiner = "&"
+        if operator == "&":
+            start = "&"
+        if operator == "":
+            joiner = ","
+        for varname, explode, prefix in varnames:
+            if varname in variables:
+                value = variables[varname]
+                if not value and value != "" and varname in defaults:
+                    value = defaults[varname]
+            elif varname in defaults:
+                value = defaults[varname]
+            else:
+                continue
+            expanded = TOSTRING[operator](
+              varname, value, explode, prefix, operator, safe=safe)
+            if expanded is not None:
+                retval.append(expanded)
+        if len(retval) > 0:
+            return start + joiner.join(retval)
+        else:
+            return ""
+
+    return TEMPLATE.sub(_sub, template)
diff --git a/utils/frozen_chromite/utils/__init__.py b/utils/frozen_chromite/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/utils/frozen_chromite/utils/__init__.py
diff --git a/utils/frozen_chromite/utils/attrs_freezer.py b/utils/frozen_chromite/utils/attrs_freezer.py
new file mode 100644
index 0000000..05fb960
--- /dev/null
+++ b/utils/frozen_chromite/utils/attrs_freezer.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Helpers for constructing objects with frozen attributes."""
+
+from __future__ import print_function
+
+import types
+
+import six
+
+
+class Error(Exception):
+  """Raised when frozen attribute value is modified."""
+
+
+class Class(type):
+  """Metaclass for any class to support freezing attribute values.
+
+  This metaclass can be used by any class to add the ability to
+  freeze attribute values with the Freeze method.
+
+  Use by adding this line before a class:
+    @six.add_metaclass(attrs_freezer.Class)
+  """
+  _FROZEN_ERR_MSG = 'Attribute values are frozen, cannot alter %s.'
+
+  def __new__(cls, clsname, bases, scope):
+    # Create Freeze method that freezes current attributes.
+    if 'Freeze' in scope:
+      raise TypeError('Class %s has its own Freeze method, cannot use with'
+                      ' the attrs_freezer.Class metaclass.' % clsname)
+
+    # Make sure cls will have _FROZEN_ERR_MSG set.
+    scope.setdefault('_FROZEN_ERR_MSG', cls._FROZEN_ERR_MSG)
+
+    # Create the class.
+    # pylint: disable=bad-super-call
+    newcls = super(Class, cls).__new__(cls, clsname, bases, scope)
+
+    # Replace cls.__setattr__ with the one that honors freezing.
+    orig_setattr = newcls.__setattr__
+
+    def SetAttr(obj, name, value):
+      """If the object is frozen then abort."""
+      # pylint: disable=protected-access
+      if getattr(obj, '_frozen', False):
+        raise Error(obj._FROZEN_ERR_MSG % name)
+      if isinstance(orig_setattr, types.MethodType):
+        orig_setattr(obj, name, value)
+      else:
+        super(newcls, obj).__setattr__(name, value)
+    newcls.__setattr__ = SetAttr
+
+    # Add new newcls.Freeze method.
+    def Freeze(obj):
+      # pylint: disable=protected-access
+      obj._frozen = True
+    newcls.Freeze = Freeze
+
+    return newcls
+
+
[email protected]_metaclass(Class)
+class Mixin(object):
+  """Alternate mechanism for freezing attributes in a class.
+
+  If an existing class is not a new-style class then it will be unable to
+  use the attrs_freezer.Class metaclass directly.  Simply use this class
+  as a mixin instead to accomplish the same thing.
+  """
diff --git a/utils/frozen_chromite/utils/key_value_store.py b/utils/frozen_chromite/utils/key_value_store.py
new file mode 100644
index 0000000..0c0e8bf
--- /dev/null
+++ b/utils/frozen_chromite/utils/key_value_store.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Common python commands used by various build scripts."""
+
+from __future__ import print_function
+
+import contextlib
+import errno
+
+import six
+
+
[email protected]
+def _Open(obj, mode='r'):
+  """Convenience ctx that accepts a file path or an already open file object."""
+  if isinstance(obj, six.string_types):
+    with open(obj, mode=mode) as f:
+      yield f
+  else:
+    yield obj
+
+
+def LoadData(data, multiline=False, source='<data>'):
+  """Turn key=value content into a dict
+
+  Note: If you're designing a new data store, please use json rather than
+  this format.  This func is designed to work with legacy/external files
+  where json isn't an option.
+
+  Only UTF-8 content is supported currently.
+
+  Args:
+    data: The data to parse.
+    multiline: Allow a value enclosed by quotes to span multiple lines.
+    source: Helpful string for users to diagnose source of errors.
+
+  Returns:
+    a dict of all the key=value pairs found in the file.
+  """
+  d = {}
+
+  key = None
+  in_quotes = None
+  for raw_line in data.splitlines(True):
+    line = raw_line.split('#')[0]
+    if not line.strip():
+      continue
+
+    # Continue processing a multiline value.
+    if multiline and in_quotes and key:
+      if line.rstrip()[-1] == in_quotes:
+        # Wrap up the multiline value if the line ends with a quote.
+        d[key] += line.rstrip()[:-1]
+        in_quotes = None
+      else:
+        d[key] += line
+      continue
+
+    chunks = line.split('=', 1)
+    if len(chunks) != 2:
+      raise ValueError('Malformed key=value file %r; line %r'
+                       % (source, raw_line))
+    key = chunks[0].strip()
+    val = chunks[1].strip()
+    if len(val) >= 2 and val[0] in '"\'' and val[0] == val[-1]:
+      # Strip matching quotes on the same line.
+      val = val[1:-1]
+    elif val and multiline and val[0] in '"\'':
+      # Unmatched quote here indicates a multiline value. Do not
+      # strip the '\n' at the end of the line.
+      in_quotes = val[0]
+      val = chunks[1].lstrip()[1:]
+    d[key] = val
+
+  return d
+
+
+def LoadFile(obj, ignore_missing=False, multiline=False):
+  """Turn a key=value file into a dict
+
+  Note: If you're designing a new data store, please use json rather than
+  this format.  This func is designed to work with legacy/external files
+  where json isn't an option.
+
+  Only UTF-8 content is supported currently.
+
+  Args:
+    obj: The file to read.  Can be a path or an open file object.
+    ignore_missing: If the file does not exist, return an empty dict.
+    multiline: Allow a value enclosed by quotes to span multiple lines.
+
+  Returns:
+    a dict of all the key=value pairs found in the file.
+  """
+  try:
+    with _Open(obj) as f:
+      return LoadData(f.read(), multiline=multiline, source=obj)
+  except EnvironmentError as e:
+    if not (ignore_missing and e.errno == errno.ENOENT):
+      raise
+
+  return {}
diff --git a/utils/frozen_chromite/utils/memoize.py b/utils/frozen_chromite/utils/memoize.py
new file mode 100644
index 0000000..79d61f8
--- /dev/null
+++ b/utils/frozen_chromite/utils/memoize.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Functions for automatic caching of expensive function calls."""
+
+from __future__ import print_function
+
+import functools
+import sys
+
+import six
+
+
+def MemoizedSingleCall(functor):
+  """Decorator for simple functor targets, caching the results
+
+  The functor must accept no arguments beyond either a class or self (depending
+  on if this is used in a classmethod/instancemethod context).  Results of the
+  wrapped method will be written to the class/instance namespace in a specially
+  named cached value.  All future invocations will just reuse that value.
+
+  Note that this cache is per-process, so sibling and parent processes won't
+  notice updates to the cache.
+  """
+  # TODO(build): Should we rebase to snakeoil.klass.cached* functionality?
+  # pylint: disable=protected-access
+  @functools.wraps(functor)
+  def wrapper(obj):
+    key = wrapper._cache_key
+    val = getattr(obj, key, None)
+    if val is None:
+      val = functor(obj)
+      setattr(obj, key, val)
+    return val
+
+  # Use name mangling to store the cached value in a (hopefully) unique place.
+  wrapper._cache_key = '_%s_cached' % (functor.__name__.lstrip('_'),)
+  return wrapper
+
+
+def Memoize(f):
+  """Decorator for memoizing a function.
+
+  Caches all calls to the function using a ._memo_cache dict mapping (args,
+  kwargs) to the results of the first function call with those args and kwargs.
+
+  If any of args or kwargs are not hashable, trying to store them in a dict will
+  cause a ValueError.
+
+  Note that this cache is per-process, so sibling and parent processes won't
+  notice updates to the cache.
+  """
+  # pylint: disable=protected-access
+  f._memo_cache = {}
+
+  @functools.wraps(f)
+  def wrapper(*args, **kwargs):
+    # Make sure that the key is hashable... as long as the contents of args and
+    # kwargs are hashable.
+    # TODO(phobbs) we could add an option to use the id(...) of an object if
+    # it's not hashable.  Then "MemoizedSingleCall" would be obsolete.
+    key = (tuple(args), tuple(sorted(kwargs.items())))
+    if key in f._memo_cache:
+      return f._memo_cache[key]
+
+    result = f(*args, **kwargs)
+    f._memo_cache[key] = result
+    return result
+
+  return wrapper
+
+
+def SafeRun(functors, combine_exceptions=False):
+  """Executes a list of functors, continuing on exceptions.
+
+  Args:
+    functors: An iterable of functors to call.
+    combine_exceptions: If set, and multiple exceptions are encountered,
+      SafeRun will raise a RuntimeError containing a list of all the exceptions.
+      If only one exception is encountered, then the default behavior of
+      re-raising the original exception with unmodified stack trace will be
+      kept.
+
+  Raises:
+    The first exception encountered, with corresponding backtrace, unless
+    |combine_exceptions| is specified and there is more than one exception
+    encountered, in which case a RuntimeError containing a list of all the
+    exceptions that were encountered is raised.
+  """
+  errors = []
+
+  for f in functors:
+    try:
+      f()
+    except Exception as e:
+      # Append the exception object and the traceback.
+      errors.append((e, sys.exc_info()[2]))
+
+  if errors:
+    if len(errors) == 1 or not combine_exceptions:
+      # To preserve the traceback.
+      inst, tb = errors[0]
+      six.reraise(inst, None, tb)
+    else:
+      raise RuntimeError([e[0] for e in errors])
diff --git a/utils/frozen_chromite/utils/outcap.py b/utils/frozen_chromite/utils/outcap.py
new file mode 100644
index 0000000..0fdea3f
--- /dev/null
+++ b/utils/frozen_chromite/utils/outcap.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 The Chromium OS Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tools for capturing program output at a low level.
+
+Mostly useful for capturing stdout/stderr as directly assigning to those
+variables won't work everywhere.
+"""
+
+from __future__ import print_function
+
+import os
+import re
+import sys
+import tempfile
+
+
+class _FdCapturer(object):
+  """Helper class to capture output at the file descriptor level.
+
+  This is meant to be used with sys.stdout or sys.stderr. By capturing
+  file descriptors, this will also intercept subprocess output, which
+  reassigning sys.stdout or sys.stderr will not do.
+
+  Output will only be captured, it will no longer be printed while
+  the capturer is active.
+  """
+
+  def __init__(self, source, output=None):
+    """Construct the _FdCapturer object.
+
+    Does not start capturing until Start() is called.
+
+    Args:
+      source: A file object to capture. Typically sys.stdout or
+        sys.stderr, but will work with anything that implements flush()
+        and fileno().
+      output: A file name where the captured output is to be stored. If None,
+        then the output will be stored to a temporary file.
+    """
+    self._source = source
+    self._captured = ''
+    self._saved_fd = None
+    self._tempfile = None
+    self._capturefile = None
+    self._capturefile_reader = None
+    self._capturefile_name = output
+
+  def _SafeCreateTempfile(self, tempfile_obj):
+    """Ensure that the tempfile is created safely.
+
+    (1) Stash away a reference to the tempfile.
+    (2) Unlink the file from the filesystem.
+
+    (2) ensures that if we crash, the file gets deleted. (1) ensures that while
+    we are running, we hold a reference to the file so the system does not close
+    the file.
+
+    Args:
+      tempfile_obj: A tempfile object.
+    """
+    self._tempfile = tempfile_obj
+    os.unlink(tempfile_obj.name)
+
+  def Start(self):
+    """Begin capturing output."""
+    if self._capturefile_name is None:
+      tempfile_obj = tempfile.NamedTemporaryFile(delete=False)
+      self._capturefile = tempfile_obj.file
+      self._capturefile_name = tempfile_obj.name
+      self._capturefile_reader = open(self._capturefile_name)
+      self._SafeCreateTempfile(tempfile_obj)
+    else:
+      # Open file passed in for writing. Set buffering=1 for line level
+      # buffering.
+      self._capturefile = open(self._capturefile_name, 'w', buffering=1)
+      self._capturefile_reader = open(self._capturefile_name)
+    # Save the original fd so we can revert in Stop().
+    self._saved_fd = os.dup(self._source.fileno())
+    os.dup2(self._capturefile.fileno(), self._source.fileno())
+
+  def Stop(self):
+    """Stop capturing output."""
+    self.GetCaptured()
+    if self._saved_fd is not None:
+      os.dup2(self._saved_fd, self._source.fileno())
+      os.close(self._saved_fd)
+      self._saved_fd = None
+    # If capturefile and capturefile_reader exist, close them as they were
+    # opened in self.Start().
+    if self._capturefile_reader is not None:
+      self._capturefile_reader.close()
+      self._capturefile_reader = None
+    if self._capturefile is not None:
+      self._capturefile.close()
+      self._capturefile = None
+
+  def GetCaptured(self):
+    """Return all output captured up to this point.
+
+    Can be used while capturing or after Stop() has been called.
+    """
+    self._source.flush()
+    if self._capturefile_reader is not None:
+      self._captured += self._capturefile_reader.read()
+    return self._captured
+
+  def ClearCaptured(self):
+    """Erase all captured output."""
+    self.GetCaptured()
+    self._captured = ''
+
+
+class OutputCapturer(object):
+  """Class for capturing stdout/stderr output.
+
+  Class is designed as a 'ContextManager'.
+
+  Examples:
+    with cros_build_lib.OutputCapturer() as output:
+      # Capturing of stdout/stderr automatically starts now.
+      # Do stuff that sends output to stdout/stderr.
+      # Capturing automatically stops at end of 'with' block.
+
+    # stdout/stderr can be retrieved from the OutputCapturer object:
+    stdout = output.GetStdoutLines() # Or other access methods
+
+    # Some Assert methods are only valid if capturing was used in test.
+    self.AssertOutputContainsError() # Or other related methods
+
+    # OutputCapturer can also be used to capture output to specified files.
+    with self.OutputCapturer(stdout_path='/tmp/stdout.txt') as output:
+      # Do stuff.
+      # stdout will be captured to /tmp/stdout.txt.
+  """
+
+  OPER_MSG_SPLIT_RE = re.compile(r'^\033\[1;.*?\033\[0m$|^[^\n]*$',
+                                 re.DOTALL | re.MULTILINE)
+
+  __slots__ = ['_stdout_capturer', '_stderr_capturer', '_quiet_fail']
+
+  def __init__(self, stdout_path=None, stderr_path=None, quiet_fail=False):
+    """Initalize OutputCapturer with capture files.
+
+    If OutputCapturer is initialized with filenames to capture stdout and stderr
+    to, then those files are used. Otherwise, temporary files are created.
+
+    Args:
+      stdout_path: File to capture stdout to. If None, a temporary file is used.
+      stderr_path: File to capture stderr to. If None, a temporary file is used.
+      quiet_fail: If True fail quietly without printing the captured stdout and
+        stderr.
+    """
+    self._stdout_capturer = _FdCapturer(sys.stdout, output=stdout_path)
+    self._stderr_capturer = _FdCapturer(sys.stderr, output=stderr_path)
+    self._quiet_fail = quiet_fail
+
+  def __enter__(self):
+    # This method is called with entering 'with' block.
+    self.StartCapturing()
+    return self
+
+  def __exit__(self, exc_type, exc_val, exc_tb):
+    # This method is called when exiting 'with' block.
+    self.StopCapturing()
+
+    if exc_type and not self._quiet_fail:
+      print('Exception during output capturing: %r' % (exc_val,))
+      stdout = self.GetStdout()
+      if stdout:
+        print('Captured stdout was:\n%s' % stdout)
+      else:
+        print('No captured stdout')
+      stderr = self.GetStderr()
+      if stderr:
+        print('Captured stderr was:\n%s' % stderr)
+      else:
+        print('No captured stderr')
+
+  def StartCapturing(self):
+    """Begin capturing stdout and stderr."""
+    self._stdout_capturer.Start()
+    self._stderr_capturer.Start()
+
+  def StopCapturing(self):
+    """Stop capturing stdout and stderr."""
+    self._stdout_capturer.Stop()
+    self._stderr_capturer.Stop()
+
+  def ClearCaptured(self):
+    """Clear any captured stdout/stderr content."""
+    self._stdout_capturer.ClearCaptured()
+    self._stderr_capturer.ClearCaptured()
+
+  def GetStdout(self):
+    """Return captured stdout so far."""
+    return self._stdout_capturer.GetCaptured()
+
+  def GetStderr(self):
+    """Return captured stderr so far."""
+    return self._stderr_capturer.GetCaptured()
+
+  def _GetOutputLines(self, output, include_empties):
+    """Split |output| into lines, optionally |include_empties|.
+
+    Return array of lines.
+    """
+
+    lines = self.OPER_MSG_SPLIT_RE.findall(output)
+    if not include_empties:
+      lines = [ln for ln in lines if ln]
+
+    return lines
+
+  def GetStdoutLines(self, include_empties=True):
+    """Return captured stdout so far as array of lines.
+
+    If |include_empties| is false filter out all empty lines.
+    """
+    return self._GetOutputLines(self.GetStdout(), include_empties)
+
+  def GetStderrLines(self, include_empties=True):
+    """Return captured stderr so far as array of lines.
+
+    If |include_empties| is false filter out all empty lines.
+    """
+    return self._GetOutputLines(self.GetStderr(), include_empties)