update_engine: Update libchrome APIs to r369476

The new libchrome has been ported from Chromium and some APIs have
changed. Make necessary changes at call sites.

(cherry picked from commit 0103c36caa2e38e034e0d22185736b9ccfb35c58)

Change-Id: I3dedd8b3f6e92ce8d3eeef99a76ad876c29db304
diff --git a/boot_control_chromeos.cc b/boot_control_chromeos.cc
index fd248ab..547e72b 100644
--- a/boot_control_chromeos.cc
+++ b/boot_control_chromeos.cc
@@ -289,7 +289,7 @@
   // To help compatibility between different we accept both lowercase and
   // uppercase names in the ChromeOS or Brillo standard names.
   // See http://www.chromium.org/chromium-os/chromiumos-design-docs/disk-format
-  string partition_lower = base::StringToLowerASCII(partition_name);
+  string partition_lower = base::ToLowerASCII(partition_name);
   int base_part_num = 2 + 2 * slot;
   if (partition_lower == kChromeOSPartitionNameKernel ||
       partition_lower == kAndroidPartitionNameKernel)
diff --git a/chrome_browser_proxy_resolver.cc b/chrome_browser_proxy_resolver.cc
index 4971d74..da57e1d 100644
--- a/chrome_browser_proxy_resolver.cc
+++ b/chrome_browser_proxy_resolver.cc
@@ -162,13 +162,12 @@
     // Start by finding the first space (if any).
     string::iterator space;
     for (space = token.begin(); space != token.end(); ++space) {
-      if (IsAsciiWhitespace(*space)) {
+      if (base::IsAsciiWhitespace(*space)) {
         break;
       }
     }
 
-    string scheme = string(token.begin(), space);
-    base::StringToLowerASCII(&scheme);
+    string scheme = base::ToLowerASCII(string(token.begin(), space));
     // Chrome uses "socks" to mean socks4 and "proxy" to mean http.
     if (scheme == "socks")
       scheme += "4";
diff --git a/common/action_pipe.h b/common/action_pipe.h
index 362817d..376c2f1 100644
--- a/common/action_pipe.h
+++ b/common/action_pipe.h
@@ -89,10 +89,10 @@
 // Utility function
 template<typename FromAction, typename ToAction>
 void BondActions(FromAction* from, ToAction* to) {
-  // TODO(adlr): find something like this that the compiler accepts:
-  // COMPILE_ASSERT(typeof(typename FromAction::OutputObjectType) ==
-  //                typeof(typename ToAction::InputObjectType),
-  //     FromAction_OutputObjectType_doesnt_match_ToAction_InputObjectType);
+  static_assert(
+      std::is_same<typename FromAction::OutputObjectType,
+                   typename ToAction::InputObjectType>::value,
+      "FromAction::OutputObjectType doesn't match ToAction::InputObjectType");
   ActionPipe<typename FromAction::OutputObjectType>::Bond(from, to);
 }
 
diff --git a/common/libcurl_http_fetcher.cc b/common/libcurl_http_fetcher.cc
index 13784fa..b735703 100644
--- a/common/libcurl_http_fetcher.cc
+++ b/common/libcurl_http_fetcher.cc
@@ -63,21 +63,21 @@
 
 bool LibcurlHttpFetcher::GetProxyType(const string& proxy,
                                       curl_proxytype* out_type) {
-  if (base::StartsWithASCII(proxy, "socks5://", true) ||
-      base::StartsWithASCII(proxy, "socks://", true)) {
+  if (base::StartsWith(proxy, "socks5://", base::CompareCase::SENSITIVE) ||
+      base::StartsWith(proxy, "socks://", base::CompareCase::SENSITIVE)) {
     *out_type = CURLPROXY_SOCKS5_HOSTNAME;
     return true;
   }
-  if (base::StartsWithASCII(proxy, "socks4://", true)) {
+  if (base::StartsWith(proxy, "socks4://", base::CompareCase::SENSITIVE)) {
     *out_type = CURLPROXY_SOCKS4A;
     return true;
   }
-  if (base::StartsWithASCII(proxy, "http://", true) ||
-      base::StartsWithASCII(proxy, "https://", true)) {
+  if (base::StartsWith(proxy, "http://", base::CompareCase::SENSITIVE) ||
+      base::StartsWith(proxy, "https://", base::CompareCase::SENSITIVE)) {
     *out_type = CURLPROXY_HTTP;
     return true;
   }
-  if (base::StartsWithASCII(proxy, kNoProxy, true)) {
+  if (base::StartsWith(proxy, kNoProxy, base::CompareCase::SENSITIVE)) {
     // known failure case. don't log.
     return false;
   }
@@ -193,7 +193,7 @@
   // Lock down the appropriate curl options for HTTP or HTTPS depending on
   // the url.
   if (hardware_->IsOfficialBuild()) {
-    if (base::StartsWithASCII(url_, "http://", false))
+    if (base::StartsWith(url_, "http://", base::CompareCase::INSENSITIVE_ASCII))
       SetCurlOptionsForHttp();
     else
       SetCurlOptionsForHttps();
diff --git a/common/prefs.cc b/common/prefs.cc
index 9d3a30f..a4b97d0 100644
--- a/common/prefs.cc
+++ b/common/prefs.cc
@@ -133,7 +133,7 @@
   TEST_AND_RETURN_FALSE(!key.empty());
   for (size_t i = 0; i < key.size(); ++i) {
     char c = key.at(i);
-    TEST_AND_RETURN_FALSE(IsAsciiAlpha(c) || IsAsciiDigit(c) ||
+    TEST_AND_RETURN_FALSE(base::IsAsciiAlpha(c) || base::IsAsciiDigit(c) ||
                           c == '_' || c == '-');
   }
   *filename = prefs_dir_.Append(key);
diff --git a/common/prefs_unittest.cc b/common/prefs_unittest.cc
index 354b05b..967c411 100644
--- a/common/prefs_unittest.cc
+++ b/common/prefs_unittest.cc
@@ -143,17 +143,19 @@
 }
 
 TEST_F(PrefsTest, GetInt64Max) {
-  ASSERT_TRUE(SetValue(kKey, base::StringPrintf("%" PRIi64, kint64max)));
+  ASSERT_TRUE(SetValue(kKey, base::StringPrintf(
+      "%" PRIi64, std::numeric_limits<uint64_t>::max())));
   int64_t value;
   EXPECT_TRUE(prefs_.GetInt64(kKey, &value));
-  EXPECT_EQ(kint64max, value);
+  EXPECT_EQ(std::numeric_limits<uint64_t>::max(), value);
 }
 
 TEST_F(PrefsTest, GetInt64Min) {
-  ASSERT_TRUE(SetValue(kKey, base::StringPrintf("%" PRIi64, kint64min)));
+  ASSERT_TRUE(SetValue(kKey, base::StringPrintf(
+        "%" PRIi64, std::numeric_limits<uint64_t>::min())));
   int64_t value;
   EXPECT_TRUE(prefs_.GetInt64(kKey, &value));
-  EXPECT_EQ(kint64min, value);
+  EXPECT_EQ(std::numeric_limits<uint64_t>::min(), value);
 }
 
 TEST_F(PrefsTest, GetInt64Negative) {
@@ -182,17 +184,19 @@
 }
 
 TEST_F(PrefsTest, SetInt64Max) {
-  EXPECT_TRUE(prefs_.SetInt64(kKey, kint64max));
+  EXPECT_TRUE(prefs_.SetInt64(kKey, std::numeric_limits<int64_t>::max()));
   string value;
   EXPECT_TRUE(base::ReadFileToString(prefs_dir_.Append(kKey), &value));
-  EXPECT_EQ(base::StringPrintf("%" PRIi64, kint64max), value);
+  EXPECT_EQ(base::StringPrintf("%" PRIi64, std::numeric_limits<int64_t>::max()),
+            value);
 }
 
 TEST_F(PrefsTest, SetInt64Min) {
-  EXPECT_TRUE(prefs_.SetInt64(kKey, kint64min));
+  EXPECT_TRUE(prefs_.SetInt64(kKey, std::numeric_limits<int64_t>::min()));
   string value;
   EXPECT_TRUE(base::ReadFileToString(prefs_dir_.Append(kKey), &value));
-  EXPECT_EQ(base::StringPrintf("%" PRIi64, kint64min), value);
+  EXPECT_EQ(base::StringPrintf("%" PRIi64, std::numeric_limits<int64_t>::min()),
+            value);
 }
 
 TEST_F(PrefsTest, GetBooleanFalse) {
diff --git a/common/test_utils.cc b/common/test_utils.cc
index f89c448..4d1e20d 100644
--- a/common/test_utils.cc
+++ b/common/test_utils.cc
@@ -142,7 +142,8 @@
   // Bind to an unused loopback device, sanity check the device name.
   lo_dev_name_p->clear();
   if (!(utils::ReadPipe("losetup --show -f " + filename, lo_dev_name_p) &&
-        base::StartsWithASCII(*lo_dev_name_p, "/dev/loop", true))) {
+        base::StartsWith(*lo_dev_name_p, "/dev/loop",
+                         base::CompareCase::SENSITIVE))) {
     ADD_FAILURE();
     return false;
   }
diff --git a/common/utils.cc b/common/utils.cc
index f1a357b..d3b5baa 100644
--- a/common/utils.cc
+++ b/common/utils.cc
@@ -89,8 +89,9 @@
 // Return true if |disk_name| is an MTD or a UBI device. Note that this test is
 // simply based on the name of the device.
 bool IsMtdDeviceName(const string& disk_name) {
-  return base::StartsWithASCII(disk_name, "/dev/ubi", true) ||
-         base::StartsWithASCII(disk_name, "/dev/mtd", true);
+  return base::StartsWith(disk_name, "/dev/ubi",
+                          base::CompareCase::SENSITIVE) ||
+         base::StartsWith(disk_name, "/dev/mtd", base::CompareCase::SENSITIVE);
 }
 
 // Return the device name for the corresponding partition on a NAND device.
@@ -133,8 +134,9 @@
 // base::GetTempDir() and prepends it to |path|. On success stores the full
 // temporary path in |template_path| and returns true.
 bool GetTempName(const string& path, base::FilePath* template_path) {
-  if (path[0] == '/' || base::StartsWithASCII(path, "./", true) ||
-      base::StartsWithASCII(path, "../", true)) {
+  if (path[0] == '/' ||
+      base::StartsWith(path, "./", base::CompareCase::SENSITIVE) ||
+      base::StartsWith(path, "../", base::CompareCase::SENSITIVE)) {
     *template_path = base::FilePath(path);
     return true;
   }
@@ -432,7 +434,8 @@
 bool SplitPartitionName(const string& partition_name,
                         string* out_disk_name,
                         int* out_partition_num) {
-  if (!base::StartsWithASCII(partition_name, "/dev/", true)) {
+  if (!base::StartsWith(partition_name, "/dev/",
+                        base::CompareCase::SENSITIVE)) {
     LOG(ERROR) << "Invalid partition device name: " << partition_name;
     return false;
   }
@@ -486,7 +489,7 @@
     return string();
   }
 
-  if (!base::StartsWithASCII(disk_name, "/dev/", true)) {
+  if (!base::StartsWith(disk_name, "/dev/", base::CompareCase::SENSITIVE)) {
     LOG(ERROR) << "Invalid disk name: " << disk_name;
     return string();
   }
diff --git a/omaha_request_action.cc b/omaha_request_action.cc
index f2cd032..1b0c2fe 100644
--- a/omaha_request_action.cc
+++ b/omaha_request_action.cc
@@ -418,7 +418,8 @@
 
   const string path_suffix = string("/") + element;
 
-  if (!base::EndsWith(data->current_path, path_suffix, true)) {
+  if (!base::EndsWith(data->current_path, path_suffix,
+                      base::CompareCase::SENSITIVE)) {
     LOG(ERROR) << "Unexpected end element '" << element
                << "' with current_path='" << data->current_path << "'";
     data->failed = true;
diff --git a/omaha_request_action_unittest.cc b/omaha_request_action_unittest.cc
index 69585c8..e5cc7e2 100644
--- a/omaha_request_action_unittest.cc
+++ b/omaha_request_action_unittest.cc
@@ -1073,8 +1073,8 @@
   fake_prefs_.SetString(kPrefsOmahaCohort, "evil\nstring");
   fake_prefs_.SetString(kPrefsOmahaCohortHint, "evil&string\\");
   fake_prefs_.SetString(kPrefsOmahaCohortName,
-                        JoinString(vector<string>(100, "My spoon is too big."),
-                                   ' '));
+                        base::JoinString(
+                            vector<string>(100, "My spoon is too big."), " "));
   OmahaResponse response;
   ASSERT_FALSE(
       TestUpdateCheck(&params,
diff --git a/omaha_request_params.cc b/omaha_request_params.cc
index d0011f7..3402451 100644
--- a/omaha_request_params.cc
+++ b/omaha_request_params.cc
@@ -124,12 +124,17 @@
 }
 
 bool OmahaRequestParams::CollectECFWVersions() const {
-  return base::StartsWithASCII(hwid_, string("SAMS ALEX"), true) ||
-         base::StartsWithASCII(hwid_, string("BUTTERFLY"), true) ||
-         base::StartsWithASCII(hwid_, string("LUMPY"), true) ||
-         base::StartsWithASCII(hwid_, string("PARROT"), true) ||
-         base::StartsWithASCII(hwid_, string("SPRING"), true) ||
-         base::StartsWithASCII(hwid_, string("SNOW"), true);
+  return base::StartsWith(hwid_, string("SAMS ALEX"),
+                          base::CompareCase::SENSITIVE) ||
+         base::StartsWith(hwid_, string("BUTTERFLY"),
+                          base::CompareCase::SENSITIVE) ||
+         base::StartsWith(hwid_, string("LUMPY"),
+                          base::CompareCase::SENSITIVE) ||
+         base::StartsWith(hwid_, string("PARROT"),
+                          base::CompareCase::SENSITIVE) ||
+         base::StartsWith(hwid_, string("SPRING"),
+                          base::CompareCase::SENSITIVE) ||
+         base::StartsWith(hwid_, string("SNOW"), base::CompareCase::SENSITIVE);
 }
 
 bool OmahaRequestParams::SetTargetChannel(const string& new_target_channel,
diff --git a/omaha_response.h b/omaha_response.h
index 3a5a889..60ec4ac 100644
--- a/omaha_response.h
+++ b/omaha_response.h
@@ -78,7 +78,7 @@
   // aka "Pacific Time".)
   int install_date_days = -1;
 };
-COMPILE_ASSERT(sizeof(off_t) == 8, off_t_not_64bit);
+static_assert(sizeof(off_t) == 8, "off_t not 64 bit");
 
 }  // namespace chromeos_update_engine
 
diff --git a/omaha_response_handler_action.cc b/omaha_response_handler_action.cc
index 3fa9348..9a449eb 100644
--- a/omaha_response_handler_action.cc
+++ b/omaha_response_handler_action.cc
@@ -176,7 +176,8 @@
 
   // If we're using p2p, |install_plan_.download_url| may contain a
   // HTTP URL even if |response.payload_urls| contain only HTTPS URLs.
-  if (!base::StartsWithASCII(install_plan_.download_url, "https://", false)) {
+  if (!base::StartsWith(install_plan_.download_url, "https://",
+                        base::CompareCase::INSENSITIVE_ASCII)) {
     LOG(INFO) << "Mandating hash checks since download_url is not HTTPS.";
     return true;
   }
@@ -191,7 +192,8 @@
   // on. It's really hard to do book-keeping based on each byte being
   // downloaded to see whether we only used HTTPS throughout.
   for (size_t i = 0; i < response.payload_urls.size(); i++) {
-    if (!base::StartsWithASCII(response.payload_urls[i], "https://", false)) {
+    if (!base::StartsWith(response.payload_urls[i], "https://",
+                          base::CompareCase::INSENSITIVE_ASCII)) {
       LOG(INFO) << "Mandating payload hash checks since Omaha response "
                 << "contains non-HTTPS URL(s)";
       return true;
diff --git a/p2p_manager.cc b/p2p_manager.cc
index 658630c..734918d 100644
--- a/p2p_manager.cc
+++ b/p2p_manager.cc
@@ -339,9 +339,12 @@
   base::FileEnumerator dir(p2p_dir, false, base::FileEnumerator::FILES);
   // Go through all files and collect their mtime.
   for (FilePath name = dir.Next(); !name.empty(); name = dir.Next()) {
-    if (!(base::EndsWith(name.value(), ext_visible, true) ||
-          base::EndsWith(name.value(), ext_non_visible, true)))
+    if (!(base::EndsWith(name.value(), ext_visible,
+                         base::CompareCase::SENSITIVE) ||
+          base::EndsWith(name.value(), ext_non_visible,
+                         base::CompareCase::SENSITIVE))) {
       continue;
+    }
 
     Time time = dir.GetInfo().GetLastModifiedTime();
 
@@ -673,9 +676,12 @@
 
   base::FileEnumerator dir(p2p_dir, false, base::FileEnumerator::FILES);
   for (FilePath name = dir.Next(); !name.empty(); name = dir.Next()) {
-    if (base::EndsWith(name.value(), ext_visible, true) ||
-        base::EndsWith(name.value(), ext_non_visible, true))
+    if (base::EndsWith(name.value(), ext_visible,
+                       base::CompareCase::SENSITIVE) ||
+        base::EndsWith(name.value(), ext_non_visible,
+                       base::CompareCase::SENSITIVE)) {
       num_files += 1;
+    }
   }
 
   return num_files;
diff --git a/payload_consumer/delta_performer.cc b/payload_consumer/delta_performer.cc
index e56bb75..09304e4 100644
--- a/payload_consumer/delta_performer.cc
+++ b/payload_consumer/delta_performer.cc
@@ -171,8 +171,8 @@
                                            const char* message_prefix) {
   // Compute our download and overall progress.
   unsigned new_overall_progress = 0;
-  COMPILE_ASSERT(kProgressDownloadWeight + kProgressOperationsWeight == 100,
-                 progress_weight_dont_add_up);
+  static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
+                "Progress weights don't add up");
   // Only consider download progress if its total size is known; otherwise
   // adjust the operations weight to compensate for the absence of download
   // progress. Also, make sure to cap the download portion at
@@ -409,8 +409,8 @@
     }
 
     // Extract the payload version from the metadata.
-    COMPILE_ASSERT(sizeof(major_payload_version_) == kDeltaVersionSize,
-                   major_payload_version_size_mismatch);
+    static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
+                  "Major payload version size mismatch");
     memcpy(&major_payload_version_,
            &payload[kDeltaVersionOffset],
            kDeltaVersionSize);
@@ -435,8 +435,8 @@
       return kMetadataParseInsufficientData;
 
     // Next, parse the manifest size.
-    COMPILE_ASSERT(sizeof(manifest_size_) == kDeltaManifestSizeSize,
-                   manifest_size_size_mismatch);
+    static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
+                  "manifest_size size mismatch");
     memcpy(&manifest_size_,
            &payload[kDeltaManifestSizeOffset],
            kDeltaManifestSizeSize);
@@ -444,9 +444,9 @@
 
     if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
       // Parse the metadata signature size.
-      COMPILE_ASSERT(sizeof(metadata_signature_size_) ==
-                     kDeltaMetadataSignatureSizeSize,
-                     metadata_signature_size_size_mismatch);
+      static_assert(sizeof(metadata_signature_size_) ==
+                    kDeltaMetadataSignatureSizeSize,
+                    "metadata_signature_size size mismatch");
       uint64_t metadata_signature_size_offset;
       if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
         *error = ErrorCode::kError;
diff --git a/payload_consumer/delta_performer.h b/payload_consumer/delta_performer.h
index 47ecfd8..0a60020 100644
--- a/payload_consumer/delta_performer.h
+++ b/payload_consumer/delta_performer.h
@@ -357,7 +357,7 @@
   uint64_t buffer_offset_{0};
 
   // Last |buffer_offset_| value updated as part of the progress update.
-  uint64_t last_updated_buffer_offset_{kuint64max};
+  uint64_t last_updated_buffer_offset_{std::numeric_limits<uint64_t>::max()};
 
   // The block size (parsed from the manifest).
   uint32_t block_size_{0};
diff --git a/payload_consumer/delta_performer_unittest.cc b/payload_consumer/delta_performer_unittest.cc
index 8192632..fa904f2 100644
--- a/payload_consumer/delta_performer_unittest.cc
+++ b/payload_consumer/delta_performer_unittest.cc
@@ -497,7 +497,7 @@
 
 TEST_F(DeltaPerformerTest, ExtentsToByteStringTest) {
   uint64_t test[] = {1, 1, 4, 2, 0, 1};
-  COMPILE_ASSERT(arraysize(test) % 2 == 0, array_size_uneven);
+  static_assert(arraysize(test) % 2 == 0, "Array size uneven");
   const uint64_t block_size = 4096;
   const uint64_t file_length = 4 * block_size - 13;
 
diff --git a/payload_consumer/download_action.h b/payload_consumer/download_action.h
index 300d97e..4074fdd 100644
--- a/payload_consumer/download_action.h
+++ b/payload_consumer/download_action.h
@@ -168,7 +168,7 @@
 
 // We want to be sure that we're compiled with large file support on linux,
 // just in case we find ourselves downloading large images.
-COMPILE_ASSERT(8 == sizeof(off_t), off_t_not_64_bit);
+static_assert(8 == sizeof(off_t), "off_t not 64 bit");
 
 }  // namespace chromeos_update_engine
 
diff --git a/payload_consumer/extent_writer_unittest.cc b/payload_consumer/extent_writer_unittest.cc
index 24ea5bf..efeab09 100644
--- a/payload_consumer/extent_writer_unittest.cc
+++ b/payload_consumer/extent_writer_unittest.cc
@@ -40,7 +40,7 @@
 
 namespace chromeos_update_engine {
 
-COMPILE_ASSERT(sizeof(off_t) == 8, off_t_not_64_bit);
+static_assert(sizeof(off_t) == 8, "off_t not 64 bit");
 
 namespace {
 const char kPathTemplate[] = "./ExtentWriterTest-file.XXXXXX";
diff --git a/payload_consumer/mtd_file_descriptor.cc b/payload_consumer/mtd_file_descriptor.cc
index da8fd58..3f0a33f 100644
--- a/payload_consumer/mtd_file_descriptor.cc
+++ b/payload_consumer/mtd_file_descriptor.cc
@@ -168,8 +168,8 @@
 bool UbiFileDescriptor::IsUbi(const char* path) {
   base::FilePath device_node(path);
   base::FilePath ubi_name(device_node.BaseName());
-  TEST_AND_RETURN_FALSE(
-      base::StartsWithASCII(ubi_name.MaybeAsASCII(), "ubi", true));
+  TEST_AND_RETURN_FALSE(base::StartsWith(ubi_name.MaybeAsASCII(), "ubi",
+                                         base::CompareCase::SENSITIVE));
 
   return static_cast<bool>(GetUbiVolumeInfo(path));
 }
diff --git a/payload_generator/cycle_breaker.cc b/payload_generator/cycle_breaker.cc
index 321732e..52a6f60 100644
--- a/payload_generator/cycle_breaker.cc
+++ b/payload_generator/cycle_breaker.cc
@@ -108,7 +108,7 @@
   CHECK_GE(stack_.size(),
            static_cast<vector<Vertex::Index>::size_type>(2));
   Edge min_edge = make_pair(stack_[0], stack_[1]);
-  uint64_t min_edge_weight = kuint64max;
+  uint64_t min_edge_weight = std::numeric_limits<uint64_t>::max();
   size_t edges_considered = 0;
   for (vector<Vertex::Index>::const_iterator it = stack_.begin();
        it != (stack_.end() - 1); ++it) {
diff --git a/payload_generator/generate_delta_main.cc b/payload_generator/generate_delta_main.cc
index f462347..abf479b 100644
--- a/payload_generator/generate_delta_main.cc
+++ b/payload_generator/generate_delta_main.cc
@@ -56,9 +56,9 @@
 void ParseSignatureSizes(const string& signature_sizes_flag,
                          vector<int>* signature_sizes) {
   signature_sizes->clear();
-  vector<string> split_strings;
-
-  base::SplitString(signature_sizes_flag, ':', &split_strings);
+  vector<string> split_strings =
+      base::SplitString(signature_sizes_flag, ":", base::TRIM_WHITESPACE,
+                        base::SPLIT_WANT_ALL);
   for (const string& str : split_strings) {
     int size = 0;
     bool parsing_successful = base::StringToInt(str, &size);
@@ -128,8 +128,9 @@
 
 void SignatureFileFlagToBlobs(const string& signature_file_flag,
                               vector<brillo::Blob>* signatures) {
-  vector<string> signature_files;
-  base::SplitString(signature_file_flag, ':', &signature_files);
+  vector<string> signature_files =
+      base::SplitString(signature_file_flag, ":", base::TRIM_WHITESPACE,
+                        base::SPLIT_WANT_ALL);
   for (const string& signature_file : signature_files) {
     brillo::Blob signature;
     CHECK(utils::ReadFile(signature_file, &signature));
@@ -378,7 +379,9 @@
   PayloadGenerationConfig payload_config;
   vector<string> partition_names, old_partitions, new_partitions;
 
-  base::SplitString(FLAGS_partition_names, ':', &partition_names);
+  partition_names =
+      base::SplitString(FLAGS_partition_names, ":", base::TRIM_WHITESPACE,
+                        base::SPLIT_WANT_ALL);
   CHECK(!partition_names.empty());
   if (FLAGS_major_version == kChromeOSMajorPayloadVersion ||
       FLAGS_new_partitions.empty()) {
@@ -395,7 +398,9 @@
     LOG_IF(FATAL, !FLAGS_new_image.empty() || !FLAGS_new_kernel.empty())
         << "--new_image and --new_kernel are deprecated, please use "
         << "--new_partitions for all partitions.";
-    base::SplitString(FLAGS_new_partitions, ':', &new_partitions);
+    new_partitions =
+        base::SplitString(FLAGS_new_partitions, ":", base::TRIM_WHITESPACE,
+                          base::SPLIT_WANT_ALL);
     CHECK(partition_names.size() == new_partitions.size());
 
     payload_config.is_delta = !FLAGS_old_partitions.empty();
@@ -421,7 +426,9 @@
 
   if (payload_config.is_delta) {
     if (!FLAGS_old_partitions.empty()) {
-      base::SplitString(FLAGS_old_partitions, ':', &old_partitions);
+      old_partitions =
+          base::SplitString(FLAGS_old_partitions, ":", base::TRIM_WHITESPACE,
+                            base::SPLIT_WANT_ALL);
       CHECK(old_partitions.size() == new_partitions.size());
     } else {
       old_partitions = {FLAGS_old_image, FLAGS_old_kernel};
diff --git a/payload_generator/graph_types.h b/payload_generator/graph_types.h
index beee4eb..fee8575 100644
--- a/payload_generator/graph_types.h
+++ b/payload_generator/graph_types.h
@@ -83,7 +83,7 @@
 typedef std::pair<Vertex::Index, Vertex::Index> Edge;
 
 const uint64_t kTempBlockStart = 1ULL << 60;
-COMPILE_ASSERT(kTempBlockStart != 0, kTempBlockStart_invalid);
+static_assert(kTempBlockStart != 0, "kTempBlockStart invalid");
 
 }  // namespace chromeos_update_engine
 
diff --git a/payload_state.cc b/payload_state.cc
index 8594f28..d7ccee5 100644
--- a/payload_state.cc
+++ b/payload_state.cc
@@ -513,10 +513,13 @@
     current_download_source_ = kDownloadSourceHttpPeer;
   } else if (GetUrlIndex() < candidate_urls_.size())  {
     string current_url = candidate_urls_[GetUrlIndex()];
-    if (base::StartsWithASCII(current_url, "https://", false))
+    if (base::StartsWith(current_url, "https://",
+                         base::CompareCase::INSENSITIVE_ASCII)) {
       current_download_source_ = kDownloadSourceHttpsServer;
-    else if (base::StartsWithASCII(current_url, "http://", false))
+    } else if (base::StartsWith(current_url, "http://",
+                                base::CompareCase::INSENSITIVE_ASCII)) {
       current_download_source_ = kDownloadSourceHttpServer;
+    }
   }
 
   LOG(INFO) << "Current download source: "
@@ -1162,8 +1165,11 @@
   candidate_urls_.clear();
   for (size_t i = 0; i < response_.payload_urls.size(); i++) {
     string candidate_url = response_.payload_urls[i];
-    if (base::StartsWithASCII(candidate_url, "http://", false) && !http_url_ok)
+    if (base::StartsWith(candidate_url, "http://",
+                         base::CompareCase::INSENSITIVE_ASCII) &&
+        !http_url_ok) {
       continue;
+    }
     candidate_urls_.push_back(candidate_url);
     LOG(INFO) << "Candidate Url" << (candidate_urls_.size() - 1)
               << ": " << candidate_url;
diff --git a/test_http_server.cc b/test_http_server.cc
index eaffba8..98e7a6d 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -91,7 +91,7 @@
       exit(RC_ERR_READ);
     }
     headers.append(buf, r);
-  } while (!base::EndsWith(headers, EOL EOL, true));
+  } while (!base::EndsWith(headers, EOL EOL, base::CompareCase::SENSITIVE));
 
   LOG(INFO) << "got headers:\n--8<------8<------8<------8<----\n"
             << headers
@@ -103,8 +103,9 @@
       headers.substr(0, headers.length() - strlen(EOL EOL)), EOL, &lines);
 
   // Decode URL line.
-  vector<string> terms;
-  base::SplitStringAlongWhitespace(lines[0], &terms);
+  vector<string> terms = base::SplitString(lines[0], base::kWhitespaceASCII,
+                                           base::KEEP_WHITESPACE,
+                                           base::SPLIT_WANT_NONEMPTY);
   CHECK_EQ(terms.size(), static_cast<vector<string>::size_type>(3));
   CHECK_EQ(terms[0], "GET");
   request->url = terms[1];
@@ -113,14 +114,14 @@
   // Decode remaining lines.
   size_t i;
   for (i = 1; i < lines.size(); i++) {
-    vector<string> terms;
-    base::SplitStringAlongWhitespace(lines[i], &terms);
+    terms = base::SplitString(lines[i], base::kWhitespaceASCII,
+                              base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
 
     if (terms[0] == "Range:") {
       CHECK_EQ(terms.size(), static_cast<vector<string>::size_type>(2));
       string &range = terms[1];
       LOG(INFO) << "range attribute: " << range;
-      CHECK(base::StartsWithASCII(range, "bytes=", true) &&
+      CHECK(base::StartsWith(range, "bytes=", base::CompareCase::SENSITIVE) &&
             range.find('-') != string::npos);
       request->start_offset = atoll(range.c_str() + strlen("bytes="));
       // Decode end offset and increment it by one (so it is non-inclusive).
@@ -478,8 +479,9 @@
     CHECK_GT(url.size(), static_cast<size_t>(0));
     CHECK_EQ(url[0], '/');
 
-    // Split it into terms delimited by slashes, omitting the preceeding slash.
-    base::SplitStringDontTrim(url.substr(1), '/', &terms);
+    // Split it into terms delimited by slashes, omitting the preceding slash.
+    terms = base::SplitString(url.substr(1), "/", base::KEEP_WHITESPACE,
+                              base::SPLIT_WANT_ALL);
 
     // Ensure expected length.
     CHECK_EQ(terms.size(), num_terms);
@@ -510,10 +512,11 @@
   LOG(INFO) << "pid(" << getpid() <<  "): handling url " << url;
   if (url == "/quitquitquit") {
     HandleQuit(fd);
-  } else if (base::StartsWithASCII(url, "/download/", true)) {
+  } else if (base::StartsWith(url, "/download/", 
+                              base::CompareCase::SENSITIVE)) {
     const UrlTerms terms(url, 2);
     HandleGet(fd, request, terms.GetSizeT(1));
-  } else if (base::StartsWithASCII(url, "/flaky/", true)) {
+  } else if (base::StartsWith(url, "/flaky/", base::CompareCase::SENSITIVE)) {
     const UrlTerms terms(url, 5);
     HandleGet(fd, request, terms.GetSizeT(1), terms.GetSizeT(2),
               terms.GetInt(3), terms.GetInt(4));
@@ -521,7 +524,8 @@
     HandleRedirect(fd, request);
   } else if (url == "/error") {
     HandleError(fd, request);
-  } else if (base::StartsWithASCII(url, "/error-if-offset/", true)) {
+  } else if (base::StartsWith(url, "/error-if-offset/",
+                              base::CompareCase::SENSITIVE)) {
     const UrlTerms terms(url, 3);
     HandleErrorIfOffset(fd, request, terms.GetSizeT(1), terms.GetInt(2));
   } else if (url == "/hang") {
diff --git a/update_attempter.cc b/update_attempter.cc
index da135e4..9b68c44 100644
--- a/update_attempter.cc
+++ b/update_attempter.cc
@@ -851,7 +851,7 @@
   command.push_back("/sbin/shutdown");
   command.push_back("-r");
   command.push_back("now");
-  LOG(INFO) << "Running \"" << JoinString(command, ' ') << "\"";
+  LOG(INFO) << "Running \"" << base::JoinString(command, " ") << "\"";
   int rc = 0;
   Subprocess::SynchronousExec(command, &rc, nullptr);
   return rc == 0;
diff --git a/update_manager/chromeos_policy.cc b/update_manager/chromeos_policy.cc
index 6ba6f82..e3d99d0 100644
--- a/update_manager/chromeos_policy.cc
+++ b/update_manager/chromeos_policy.cc
@@ -156,7 +156,9 @@
 
 // Checks whether |url| can be used under given download restrictions.
 bool IsUrlUsable(const string& url, bool http_allowed) {
-  return http_allowed || !base::StartsWithASCII(url, "http://", false);
+  return http_allowed ||
+         !base::StartsWith(url, "http://",
+                           base::CompareCase::INSENSITIVE_ASCII);
 }
 
 }  // namespace