Initial check in

Bug: 137197907
diff --git a/src/llvm-project/llvm/unittests/Analysis/AliasAnalysisTest.cpp b/src/llvm-project/llvm/unittests/Analysis/AliasAnalysisTest.cpp
new file mode 100644
index 0000000..42a4210
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/AliasAnalysisTest.cpp
@@ -0,0 +1,253 @@
+//===--- AliasAnalysisTest.cpp - Mixed TBAA unit tests --------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+// Set up some test passes.
+namespace llvm {
+void initializeAATestPassPass(PassRegistry&);
+void initializeTestCustomAAWrapperPassPass(PassRegistry&);
+}
+
+namespace {
+struct AATestPass : FunctionPass {
+  static char ID;
+  AATestPass() : FunctionPass(ID) {
+    initializeAATestPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<AAResultsWrapperPass>();
+    AU.setPreservesAll();
+  }
+
+  bool runOnFunction(Function &F) override {
+    AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
+
+    SetVector<Value *> Pointers;
+    for (Argument &A : F.args())
+      if (A.getType()->isPointerTy())
+        Pointers.insert(&A);
+    for (Instruction &I : instructions(F))
+      if (I.getType()->isPointerTy())
+        Pointers.insert(&I);
+
+    for (Value *P1 : Pointers)
+      for (Value *P2 : Pointers)
+        (void)AA.alias(P1, LocationSize::unknown(), P2,
+                       LocationSize::unknown());
+
+    return false;
+  }
+};
+}
+
+char AATestPass::ID = 0;
+INITIALIZE_PASS_BEGIN(AATestPass, "aa-test-pas", "Alias Analysis Test Pass",
+                      false, true)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_END(AATestPass, "aa-test-pass", "Alias Analysis Test Pass",
+                    false, true)
+
+namespace {
+/// A test customizable AA result. It merely accepts a callback to run whenever
+/// it receives an alias query. Useful for testing that a particular AA result
+/// is reached.
+struct TestCustomAAResult : AAResultBase<TestCustomAAResult> {
+  friend AAResultBase<TestCustomAAResult>;
+
+  std::function<void()> CB;
+
+  explicit TestCustomAAResult(std::function<void()> CB)
+      : AAResultBase(), CB(std::move(CB)) {}
+  TestCustomAAResult(TestCustomAAResult &&Arg)
+      : AAResultBase(std::move(Arg)), CB(std::move(Arg.CB)) {}
+
+  bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+
+  AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+    CB();
+    return MayAlias;
+  }
+};
+}
+
+namespace {
+/// A wrapper pass for the legacy pass manager to use with the above custom AA
+/// result.
+class TestCustomAAWrapperPass : public ImmutablePass {
+  std::function<void()> CB;
+  std::unique_ptr<TestCustomAAResult> Result;
+
+public:
+  static char ID;
+
+  explicit TestCustomAAWrapperPass(
+      std::function<void()> CB = std::function<void()>())
+      : ImmutablePass(ID), CB(std::move(CB)) {
+    initializeTestCustomAAWrapperPassPass(*PassRegistry::getPassRegistry());
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<TargetLibraryInfoWrapperPass>();
+  }
+
+  bool doInitialization(Module &M) override {
+    Result.reset(new TestCustomAAResult(std::move(CB)));
+    return true;
+  }
+
+  bool doFinalization(Module &M) override {
+    Result.reset();
+    return true;
+  }
+
+  TestCustomAAResult &getResult() { return *Result; }
+  const TestCustomAAResult &getResult() const { return *Result; }
+};
+}
+
+char TestCustomAAWrapperPass::ID = 0;
+INITIALIZE_PASS_BEGIN(TestCustomAAWrapperPass, "test-custom-aa",
+                "Test Custom AA Wrapper Pass", false, true)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_END(TestCustomAAWrapperPass, "test-custom-aa",
+                "Test Custom AA Wrapper Pass", false, true)
+
+namespace {
+
+class AliasAnalysisTest : public testing::Test {
+protected:
+  LLVMContext C;
+  Module M;
+  TargetLibraryInfoImpl TLII;
+  TargetLibraryInfo TLI;
+  std::unique_ptr<AssumptionCache> AC;
+  std::unique_ptr<BasicAAResult> BAR;
+  std::unique_ptr<AAResults> AAR;
+
+  AliasAnalysisTest() : M("AliasAnalysisTest", C), TLI(TLII) {}
+
+  AAResults &getAAResults(Function &F) {
+    // Reset the Function AA results first to clear out any references.
+    AAR.reset(new AAResults(TLI));
+
+    // Build the various AA results and register them.
+    AC.reset(new AssumptionCache(F));
+    BAR.reset(new BasicAAResult(M.getDataLayout(), F, TLI, *AC));
+    AAR->addAAResult(*BAR);
+
+    return *AAR;
+  }
+};
+
+TEST_F(AliasAnalysisTest, getModRefInfo) {
+  // Setup function.
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(C), std::vector<Type *>(), false);
+  auto *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  auto *BB = BasicBlock::Create(C, "entry", F);
+  auto IntType = Type::getInt32Ty(C);
+  auto PtrType = Type::getInt32PtrTy(C);
+  auto *Value = ConstantInt::get(IntType, 42);
+  auto *Addr = ConstantPointerNull::get(PtrType);
+
+  auto *Store1 = new StoreInst(Value, Addr, BB);
+  auto *Load1 = new LoadInst(Addr, "load", BB);
+  auto *Add1 = BinaryOperator::CreateAdd(Value, Value, "add", BB);
+  auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB);
+  auto *CmpXChg1 = new AtomicCmpXchgInst(
+      Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1),
+      AtomicOrdering::Monotonic, AtomicOrdering::Monotonic,
+      SyncScope::System, BB);
+  auto *AtomicRMW =
+      new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1),
+                        AtomicOrdering::Monotonic, SyncScope::System, BB);
+
+  ReturnInst::Create(C, nullptr, BB);
+
+  auto &AA = getAAResults(*F);
+
+  // Check basic results
+  EXPECT_EQ(AA.getModRefInfo(Store1, MemoryLocation()), ModRefInfo::Mod);
+  EXPECT_EQ(AA.getModRefInfo(Store1, None), ModRefInfo::Mod);
+  EXPECT_EQ(AA.getModRefInfo(Load1, MemoryLocation()), ModRefInfo::Ref);
+  EXPECT_EQ(AA.getModRefInfo(Load1, None), ModRefInfo::Ref);
+  EXPECT_EQ(AA.getModRefInfo(Add1, MemoryLocation()), ModRefInfo::NoModRef);
+  EXPECT_EQ(AA.getModRefInfo(Add1, None), ModRefInfo::NoModRef);
+  EXPECT_EQ(AA.getModRefInfo(VAArg1, MemoryLocation()), ModRefInfo::ModRef);
+  EXPECT_EQ(AA.getModRefInfo(VAArg1, None), ModRefInfo::ModRef);
+  EXPECT_EQ(AA.getModRefInfo(CmpXChg1, MemoryLocation()), ModRefInfo::ModRef);
+  EXPECT_EQ(AA.getModRefInfo(CmpXChg1, None), ModRefInfo::ModRef);
+  EXPECT_EQ(AA.getModRefInfo(AtomicRMW, MemoryLocation()), ModRefInfo::ModRef);
+  EXPECT_EQ(AA.getModRefInfo(AtomicRMW, None), ModRefInfo::ModRef);
+}
+
+class AAPassInfraTest : public testing::Test {
+protected:
+  LLVMContext C;
+  SMDiagnostic Err;
+  std::unique_ptr<Module> M;
+
+public:
+  AAPassInfraTest()
+      : M(parseAssemblyString("define i32 @f(i32* %x, i32* %y) {\n"
+                              "entry:\n"
+                              "  %lx = load i32, i32* %x\n"
+                              "  %ly = load i32, i32* %y\n"
+                              "  %sum = add i32 %lx, %ly\n"
+                              "  ret i32 %sum\n"
+                              "}\n",
+                              Err, C)) {
+    assert(M && "Failed to build the module!");
+  }
+};
+
+TEST_F(AAPassInfraTest, injectExternalAA) {
+  legacy::PassManager PM;
+
+  // Register our custom AA's wrapper pass manually.
+  bool IsCustomAAQueried = false;
+  PM.add(new TestCustomAAWrapperPass([&] { IsCustomAAQueried = true; }));
+
+  // Now add the external AA wrapper with a lambda which queries for the
+  // wrapper around our custom AA and adds it to the results.
+  PM.add(createExternalAAWrapperPass([](Pass &P, Function &, AAResults &AAR) {
+    if (auto *WrapperPass = P.getAnalysisIfAvailable<TestCustomAAWrapperPass>())
+      AAR.addAAResult(WrapperPass->getResult());
+  }));
+
+  // And run a pass that will make some alias queries. This will automatically
+  // trigger the rest of the alias analysis stack to be run. It is analagous to
+  // building a full pass pipeline with any of the existing pass manager
+  // builders.
+  PM.add(new AATestPass());
+  PM.run(*M);
+
+  // Finally, ensure that our custom AA was indeed queried.
+  EXPECT_TRUE(IsCustomAAQueried);
+}
+
+} // end anonymous namspace
diff --git a/src/llvm-project/llvm/unittests/Analysis/AliasSetTrackerTest.cpp b/src/llvm-project/llvm/unittests/Analysis/AliasSetTrackerTest.cpp
new file mode 100644
index 0000000..57d21e2
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/AliasSetTrackerTest.cpp
@@ -0,0 +1,89 @@
+//=======- AliasSetTrackerTest.cpp - Unit test for the Alias Set Tracker  -===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+TEST(AliasSetTracker, AliasUnknownInst) {
+  StringRef Assembly = R"(
+    @a = common global i32 0, align 4
+    @b = common global float 0.000000e+00, align 4
+
+    ; Function Attrs: nounwind ssp uwtable
+    define i32 @read_a() #0 {
+      %1 = load i32, i32* @a, align 4, !tbaa !3
+      ret i32 %1
+    }
+
+    ; Function Attrs: nounwind ssp uwtable
+    define void @write_b() #0 {
+      store float 1.000000e+01, float* @b, align 4, !tbaa !7
+      ret void
+    }
+
+    ; Function Attrs: nounwind ssp uwtable
+    define void @test() #0 {
+      %1 = call i32 @read_a(), !tbaa !3
+      call void @write_b(), !tbaa !7
+      ret void
+    }
+
+    !3 = !{!4, !4, i64 0}
+    !4 = !{!"int", !5, i64 0}
+    !5 = !{!"omnipotent char", !6, i64 0}
+    !6 = !{!"Simple C/C++ TBAA"}
+    !7 = !{!8, !8, i64 0}
+    !8 = !{!"float", !5, i64 0}
+  )";
+
+  // Parse the IR. The two calls in @test can not access aliasing elements.
+  LLVMContext Context;
+  SMDiagnostic Error;
+  auto M = parseAssemblyString(Assembly, Error, Context);
+  ASSERT_TRUE(M) << "Bad assembly?";
+
+  // Initialize the alias result.
+  Triple Trip(M->getTargetTriple());
+  TargetLibraryInfoImpl TLII(Trip);
+  TargetLibraryInfo TLI(TLII);
+  AAResults AA(TLI);
+  TypeBasedAAResult TBAAR;
+  AA.addAAResult(TBAAR);
+
+  // Initialize the alias set tracker for the @test function.
+  Function *Test = M->getFunction("test");
+  ASSERT_NE(Test, nullptr);
+  AliasSetTracker AST(AA);
+  for (auto &BB : *Test)
+    AST.add(BB);
+  // There should be 2 disjoint alias sets. 1 from each call. 
+  ASSERT_EQ((int)AST.getAliasSets().size(), 2);
+
+  // Directly test aliasesUnknownInst.
+  // Now every call instruction should only alias one alias set.
+  for (auto &Inst : *Test->begin()) {
+    bool FoundAS = false;
+    for (AliasSet &AS : AST) {
+      if (!Inst.mayReadOrWriteMemory())
+        continue;
+      if (!AS.aliasesUnknownInst(&Inst, AA))
+        continue;
+      ASSERT_NE(FoundAS, true);
+      FoundAS = true;
+    }
+  }
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp b/src/llvm-project/llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp
new file mode 100644
index 0000000..fc52aff
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/BasicAliasAnalysisTest.cpp
@@ -0,0 +1,124 @@
+//===- BasicAliasAnalysisTest.cpp - Unit tests for BasicAA ----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Targeted tests that are hard/convoluted to make happen with just `opt`.
+//
+
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+// FIXME: This is duplicated between this file and MemorySSATest. Refactor.
+const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
+
+/// There's a lot of common setup between these tests. This fixture helps reduce
+/// that. Tests should mock up a function, store it in F, and then call
+/// setupAnalyses().
+class BasicAATest : public testing::Test {
+protected:
+  // N.B. Many of these members depend on each other (e.g. the Module depends on
+  // the Context, etc.). So, order matters here (and in TestAnalyses).
+  LLVMContext C;
+  Module M;
+  IRBuilder<> B;
+  DataLayout DL;
+  TargetLibraryInfoImpl TLII;
+  TargetLibraryInfo TLI;
+  Function *F;
+
+  // Things that we need to build after the function is created.
+  struct TestAnalyses {
+    DominatorTree DT;
+    AssumptionCache AC;
+    BasicAAResult BAA;
+
+    TestAnalyses(BasicAATest &Test)
+        : DT(*Test.F), AC(*Test.F), BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {}
+  };
+
+  llvm::Optional<TestAnalyses> Analyses;
+
+  BasicAAResult &setupAnalyses() {
+    assert(F);
+    Analyses.emplace(*this);
+    return Analyses->BAA;
+  }
+
+public:
+  BasicAATest()
+      : M("BasicAATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
+};
+
+// Check that a function arg can't trivially alias a global when we're accessing
+// >sizeof(global) bytes through that arg, unless the access size is just an
+// upper-bound.
+TEST_F(BasicAATest, AliasInstWithObjectOfImpreciseSize) {
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt32Ty()->getPointerTo()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+
+  Value *IncomingI32Ptr = F->arg_begin();
+
+  auto *GlobalPtr =
+      cast<GlobalVariable>(M.getOrInsertGlobal("some_global", B.getInt8Ty()));
+
+  // Without sufficiently restricted linkage/an init, some of the object size
+  // checking bits get more conservative.
+  GlobalPtr->setLinkage(GlobalValue::LinkageTypes::InternalLinkage);
+  GlobalPtr->setInitializer(B.getInt8(0));
+
+  BasicAAResult &BasicAA = setupAnalyses();
+  ASSERT_EQ(
+      BasicAA.alias(MemoryLocation(IncomingI32Ptr, LocationSize::precise(4)),
+                    MemoryLocation(GlobalPtr, LocationSize::precise(1))),
+      AliasResult::NoAlias);
+
+  ASSERT_EQ(
+      BasicAA.alias(MemoryLocation(IncomingI32Ptr, LocationSize::upperBound(4)),
+                    MemoryLocation(GlobalPtr, LocationSize::precise(1))),
+      AliasResult::MayAlias);
+}
+
+// Check that we fall back to MayAlias if we see an access of an entire object
+// that's just an upper-bound.
+TEST_F(BasicAATest, AliasInstWithFullObjectOfImpreciseSize) {
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt64Ty()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+
+  Value *ArbitraryI32 = F->arg_begin();
+  AllocaInst *I8 = B.CreateAlloca(B.getInt8Ty(), B.getInt32(2));
+  auto *I8AtUncertainOffset =
+      cast<GetElementPtrInst>(B.CreateGEP(I8, ArbitraryI32));
+
+  BasicAAResult &BasicAA = setupAnalyses();
+  ASSERT_EQ(BasicAA.alias(
+                MemoryLocation(I8, LocationSize::precise(2)),
+                MemoryLocation(I8AtUncertainOffset, LocationSize::precise(1))),
+            AliasResult::PartialAlias);
+
+  ASSERT_EQ(BasicAA.alias(
+                MemoryLocation(I8, LocationSize::upperBound(2)),
+                MemoryLocation(I8AtUncertainOffset, LocationSize::precise(1))),
+            AliasResult::MayAlias);
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/BlockFrequencyInfoTest.cpp b/src/llvm-project/llvm/unittests/Analysis/BlockFrequencyInfoTest.cpp
new file mode 100644
index 0000000..cc05306
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/BlockFrequencyInfoTest.cpp
@@ -0,0 +1,95 @@
+//===- BlockFrequencyInfoTest.cpp - BlockFrequencyInfo unit tests ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+class BlockFrequencyInfoTest : public testing::Test {
+protected:
+  std::unique_ptr<BranchProbabilityInfo> BPI;
+  std::unique_ptr<DominatorTree> DT;
+  std::unique_ptr<LoopInfo> LI;
+  LLVMContext C;
+
+  BlockFrequencyInfo buildBFI(Function &F) {
+    DT.reset(new DominatorTree(F));
+    LI.reset(new LoopInfo(*DT));
+    BPI.reset(new BranchProbabilityInfo(F, *LI));
+    return BlockFrequencyInfo(F, *BPI, *LI);
+  }
+  std::unique_ptr<Module> makeLLVMModule() {
+    const char *ModuleStrig = "define i32 @f(i32 %x) {\n"
+                              "bb0:\n"
+                              "  %y1 = icmp eq i32 %x, 0 \n"
+                              "  br i1 %y1, label %bb1, label %bb2 \n"
+                              "bb1:\n"
+                              "  br label %bb3\n"
+                              "bb2:\n"
+                              "  br label %bb3\n"
+                              "bb3:\n"
+                              "  %y2 = phi i32 [0, %bb1], [1, %bb2] \n"
+                              "  ret i32 %y2\n"
+                              "}\n";
+    SMDiagnostic Err;
+    return parseAssemblyString(ModuleStrig, Err, C);
+  }
+};
+
+TEST_F(BlockFrequencyInfoTest, Basic) {
+  auto M = makeLLVMModule();
+  Function *F = M->getFunction("f");
+  F->setEntryCount(100);
+
+  BlockFrequencyInfo BFI = buildBFI(*F);
+  BasicBlock &BB0 = F->getEntryBlock();
+  BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+  BasicBlock *BB2 = BB0.getTerminator()->getSuccessor(1);
+  BasicBlock *BB3 = BB1->getSingleSuccessor();
+
+  uint64_t BB0Freq = BFI.getBlockFreq(&BB0).getFrequency();
+  uint64_t BB1Freq = BFI.getBlockFreq(BB1).getFrequency();
+  uint64_t BB2Freq = BFI.getBlockFreq(BB2).getFrequency();
+  uint64_t BB3Freq = BFI.getBlockFreq(BB3).getFrequency();
+
+  EXPECT_EQ(BB0Freq, BB3Freq);
+  EXPECT_EQ(BB0Freq, BB1Freq + BB2Freq);
+  EXPECT_EQ(BB0Freq, BB3Freq);
+
+  EXPECT_EQ(BFI.getBlockProfileCount(&BB0).getValue(), UINT64_C(100));
+  EXPECT_EQ(BFI.getBlockProfileCount(BB3).getValue(), UINT64_C(100));
+  EXPECT_EQ(BFI.getBlockProfileCount(BB1).getValue(),
+            (100 * BB1Freq + BB0Freq / 2) / BB0Freq);
+  EXPECT_EQ(BFI.getBlockProfileCount(BB2).getValue(),
+            (100 * BB2Freq + BB0Freq / 2) / BB0Freq);
+
+  // Scale the frequencies of BB0, BB1 and BB2 by a factor of two.
+  SmallPtrSet<BasicBlock *, 4> BlocksToScale({BB1, BB2});
+  BFI.setBlockFreqAndScale(&BB0, BB0Freq * 2, BlocksToScale);
+  EXPECT_EQ(BFI.getBlockFreq(&BB0).getFrequency(), 2 * BB0Freq);
+  EXPECT_EQ(BFI.getBlockFreq(BB1).getFrequency(), 2 * BB1Freq);
+  EXPECT_EQ(BFI.getBlockFreq(BB2).getFrequency(), 2 * BB2Freq);
+  EXPECT_EQ(BFI.getBlockFreq(BB3).getFrequency(), BB3Freq);
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/src/llvm-project/llvm/unittests/Analysis/BranchProbabilityInfoTest.cpp b/src/llvm-project/llvm/unittests/Analysis/BranchProbabilityInfoTest.cpp
new file mode 100644
index 0000000..529af5c
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/BranchProbabilityInfoTest.cpp
@@ -0,0 +1,88 @@
+//===- BranchProbabilityInfoTest.cpp - BranchProbabilityInfo unit tests ---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+struct BranchProbabilityInfoTest : public testing::Test {
+  std::unique_ptr<BranchProbabilityInfo> BPI;
+  std::unique_ptr<DominatorTree> DT;
+  std::unique_ptr<LoopInfo> LI;
+  LLVMContext C;
+
+  BranchProbabilityInfo &buildBPI(Function &F) {
+    DT.reset(new DominatorTree(F));
+    LI.reset(new LoopInfo(*DT));
+    BPI.reset(new BranchProbabilityInfo(F, *LI));
+    return *BPI;
+  }
+
+  std::unique_ptr<Module> makeLLVMModule() {
+    const char *ModuleString = "define void @f() { exit: ret void }\n";
+    SMDiagnostic Err;
+    return parseAssemblyString(ModuleString, Err, C);
+  }
+};
+
+TEST_F(BranchProbabilityInfoTest, StressUnreachableHeuristic) {
+  auto M = makeLLVMModule();
+  Function *F = M->getFunction("f");
+
+  // define void @f() {
+  // entry:
+  //   switch i32 undef, label %exit, [
+  //      i32 0, label %preexit
+  //      ...                   ;;< Add lots of cases to stress the heuristic.
+  //   ]
+  // preexit:
+  //   unreachable
+  // exit:
+  //   ret void
+  // }
+
+  auto *ExitBB = &F->back();
+  auto *EntryBB = BasicBlock::Create(C, "entry", F, /*insertBefore=*/ExitBB);
+
+  auto *PreExitBB =
+      BasicBlock::Create(C, "preexit", F, /*insertBefore=*/ExitBB);
+  new UnreachableInst(C, PreExitBB);
+
+  unsigned NumCases = 4096;
+  auto *I32 = IntegerType::get(C, 32);
+  auto *Undef = UndefValue::get(I32);
+  auto *Switch = SwitchInst::Create(Undef, ExitBB, NumCases, EntryBB);
+  for (unsigned I = 0; I < NumCases; ++I)
+    Switch->addCase(ConstantInt::get(I32, I), PreExitBB);
+
+  BranchProbabilityInfo &BPI = buildBPI(*F);
+
+  // FIXME: This doesn't seem optimal. Since all of the cases handled by the
+  // switch have the *same* destination block ("preexit"), shouldn't it be the
+  // hot one? I'd expect the results to be reversed here...
+  EXPECT_FALSE(BPI.isEdgeHot(EntryBB, PreExitBB));
+  EXPECT_TRUE(BPI.isEdgeHot(EntryBB, ExitBB));
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/src/llvm-project/llvm/unittests/Analysis/CFGTest.cpp b/src/llvm-project/llvm/unittests/Analysis/CFGTest.cpp
new file mode 100644
index 0000000..d7f14c3
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/CFGTest.cpp
@@ -0,0 +1,388 @@
+//===- CFGTest.cpp - CFG tests --------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CFG.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+// This fixture assists in running the isPotentiallyReachable utility four ways
+// and ensuring it produces the correct answer each time.
+class IsPotentiallyReachableTest : public testing::Test {
+protected:
+  void ParseAssembly(const char *Assembly) {
+    SMDiagnostic Error;
+    M = parseAssemblyString(Assembly, Error, Context);
+
+    std::string errMsg;
+    raw_string_ostream os(errMsg);
+    Error.print("", os);
+
+    // A failure here means that the test itself is buggy.
+    if (!M)
+      report_fatal_error(os.str().c_str());
+
+    Function *F = M->getFunction("test");
+    if (F == nullptr)
+      report_fatal_error("Test must have a function named @test");
+
+    A = B = nullptr;
+    for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
+      if (I->hasName()) {
+        if (I->getName() == "A")
+          A = &*I;
+        else if (I->getName() == "B")
+          B = &*I;
+      }
+    }
+    if (A == nullptr)
+      report_fatal_error("@test must have an instruction %A");
+    if (B == nullptr)
+      report_fatal_error("@test must have an instruction %B");
+  }
+
+  void ExpectPath(bool ExpectedResult) {
+    static char ID;
+    class IsPotentiallyReachableTestPass : public FunctionPass {
+     public:
+      IsPotentiallyReachableTestPass(bool ExpectedResult,
+                                     Instruction *A, Instruction *B)
+          : FunctionPass(ID), ExpectedResult(ExpectedResult), A(A), B(B) {}
+
+      static int initialize() {
+        PassInfo *PI = new PassInfo("isPotentiallyReachable testing pass",
+                                    "", &ID, nullptr, true, true);
+        PassRegistry::getPassRegistry()->registerPass(*PI, false);
+        initializeLoopInfoWrapperPassPass(*PassRegistry::getPassRegistry());
+        initializeDominatorTreeWrapperPassPass(
+            *PassRegistry::getPassRegistry());
+        return 0;
+      }
+
+      void getAnalysisUsage(AnalysisUsage &AU) const override {
+        AU.setPreservesAll();
+        AU.addRequired<LoopInfoWrapperPass>();
+        AU.addRequired<DominatorTreeWrapperPass>();
+      }
+
+      bool runOnFunction(Function &F) override {
+        if (!F.hasName() || F.getName() != "test")
+          return false;
+
+        LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+        DominatorTree *DT =
+            &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+        EXPECT_EQ(isPotentiallyReachable(A, B, nullptr, nullptr),
+                  ExpectedResult);
+        EXPECT_EQ(isPotentiallyReachable(A, B, DT, nullptr), ExpectedResult);
+        EXPECT_EQ(isPotentiallyReachable(A, B, nullptr, LI), ExpectedResult);
+        EXPECT_EQ(isPotentiallyReachable(A, B, DT, LI), ExpectedResult);
+        return false;
+      }
+      bool ExpectedResult;
+      Instruction *A, *B;
+    };
+
+    static int initialize = IsPotentiallyReachableTestPass::initialize();
+    (void)initialize;
+
+    IsPotentiallyReachableTestPass *P =
+        new IsPotentiallyReachableTestPass(ExpectedResult, A, B);
+    legacy::PassManager PM;
+    PM.add(P);
+    PM.run(*M);
+  }
+
+  LLVMContext Context;
+  std::unique_ptr<Module> M;
+  Instruction *A, *B;
+};
+
+}
+
+TEST_F(IsPotentiallyReachableTest, SameBlockNoPath) {
+  ParseAssembly(
+      "define void @test() {\n"
+      "entry:\n"
+      "  bitcast i8 undef to i8\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  bitcast i8 undef to i8\n"
+      "  bitcast i8 undef to i8\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  ret void\n"
+      "}\n");
+  ExpectPath(false);
+}
+
+TEST_F(IsPotentiallyReachableTest, SameBlockPath) {
+  ParseAssembly(
+      "define void @test() {\n"
+      "entry:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  bitcast i8 undef to i8\n"
+      "  bitcast i8 undef to i8\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  ret void\n"
+      "}\n");
+  ExpectPath(true);
+}
+
+TEST_F(IsPotentiallyReachableTest, SameBlockNoLoop) {
+  ParseAssembly(
+      "define void @test() {\n"
+      "entry:\n"
+      "  br label %middle\n"
+      "middle:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  bitcast i8 undef to i8\n"
+      "  bitcast i8 undef to i8\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  br label %nextblock\n"
+      "nextblock:\n"
+      "  ret void\n"
+      "}\n");
+  ExpectPath(false);
+}
+
+TEST_F(IsPotentiallyReachableTest, StraightNoPath) {
+  ParseAssembly(
+      "define void @test() {\n"
+      "entry:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  br label %exit\n"
+      "exit:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  ret void\n"
+      "}");
+  ExpectPath(false);
+}
+
+TEST_F(IsPotentiallyReachableTest, StraightPath) {
+  ParseAssembly(
+      "define void @test() {\n"
+      "entry:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  br label %exit\n"
+      "exit:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  ret void\n"
+      "}");
+  ExpectPath(true);
+}
+
+TEST_F(IsPotentiallyReachableTest, DestUnreachable) {
+  ParseAssembly(
+      "define void @test() {\n"
+      "entry:\n"
+      "  br label %midblock\n"
+      "midblock:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  ret void\n"
+      "unreachable:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  br label %midblock\n"
+      "}");
+  ExpectPath(false);
+}
+
+TEST_F(IsPotentiallyReachableTest, BranchToReturn) {
+  ParseAssembly(
+      "define void @test(i1 %x) {\n"
+      "entry:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  br i1 %x, label %block1, label %block2\n"
+      "block1:\n"
+      "  ret void\n"
+      "block2:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  ret void\n"
+      "}");
+  ExpectPath(true);
+}
+
+TEST_F(IsPotentiallyReachableTest, SimpleLoop1) {
+  ParseAssembly(
+      "declare i1 @switch()\n"
+      "\n"
+      "define void @test() {\n"
+      "entry:\n"
+      "  br label %loop\n"
+      "loop:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  %x = call i1 @switch()\n"
+      "  br i1 %x, label %loop, label %exit\n"
+      "exit:\n"
+      "  ret void\n"
+      "}");
+  ExpectPath(true);
+}
+
+TEST_F(IsPotentiallyReachableTest, SimpleLoop2) {
+  ParseAssembly(
+      "declare i1 @switch()\n"
+      "\n"
+      "define void @test() {\n"
+      "entry:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  br label %loop\n"
+      "loop:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  %x = call i1 @switch()\n"
+      "  br i1 %x, label %loop, label %exit\n"
+      "exit:\n"
+      "  ret void\n"
+      "}");
+  ExpectPath(false);
+}
+
+TEST_F(IsPotentiallyReachableTest, SimpleLoop3) {
+  ParseAssembly(
+      "declare i1 @switch()\n"
+      "\n"
+      "define void @test() {\n"
+      "entry:\n"
+      "  br label %loop\n"
+      "loop:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  %x = call i1 @switch()\n"
+      "  br i1 %x, label %loop, label %exit\n"
+      "exit:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  ret void\n"
+      "}");
+  ExpectPath(false);
+}
+
+
+TEST_F(IsPotentiallyReachableTest, OneLoopAfterTheOther1) {
+  ParseAssembly(
+      "declare i1 @switch()\n"
+      "\n"
+      "define void @test() {\n"
+      "entry:\n"
+      "  br label %loop1\n"
+      "loop1:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  %x = call i1 @switch()\n"
+      "  br i1 %x, label %loop1, label %loop1exit\n"
+      "loop1exit:\n"
+      "  br label %loop2\n"
+      "loop2:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  %y = call i1 @switch()\n"
+      "  br i1 %x, label %loop2, label %loop2exit\n"
+      "loop2exit:"
+      "  ret void\n"
+      "}");
+  ExpectPath(true);
+}
+
+TEST_F(IsPotentiallyReachableTest, OneLoopAfterTheOther2) {
+  ParseAssembly(
+      "declare i1 @switch()\n"
+      "\n"
+      "define void @test() {\n"
+      "entry:\n"
+      "  br label %loop1\n"
+      "loop1:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  %x = call i1 @switch()\n"
+      "  br i1 %x, label %loop1, label %loop1exit\n"
+      "loop1exit:\n"
+      "  br label %loop2\n"
+      "loop2:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  %y = call i1 @switch()\n"
+      "  br i1 %x, label %loop2, label %loop2exit\n"
+      "loop2exit:"
+      "  ret void\n"
+      "}");
+  ExpectPath(false);
+}
+
+TEST_F(IsPotentiallyReachableTest, OneLoopAfterTheOtherInsideAThirdLoop) {
+  ParseAssembly(
+      "declare i1 @switch()\n"
+      "\n"
+      "define void @test() {\n"
+      "entry:\n"
+      "  br label %outerloop3\n"
+      "outerloop3:\n"
+      "  br label %innerloop1\n"
+      "innerloop1:\n"
+      "  %B = bitcast i8 undef to i8\n"
+      "  %x = call i1 @switch()\n"
+      "  br i1 %x, label %innerloop1, label %innerloop1exit\n"
+      "innerloop1exit:\n"
+      "  br label %innerloop2\n"
+      "innerloop2:\n"
+      "  %A = bitcast i8 undef to i8\n"
+      "  %y = call i1 @switch()\n"
+      "  br i1 %x, label %innerloop2, label %innerloop2exit\n"
+      "innerloop2exit:"
+      "  ;; In outer loop3 now.\n"
+      "  %z = call i1 @switch()\n"
+      "  br i1 %z, label %outerloop3, label %exit\n"
+      "exit:\n"
+      "  ret void\n"
+      "}");
+  ExpectPath(true);
+}
+
+static const char *BranchInsideLoopIR =
+    "declare i1 @switch()\n"
+    "\n"
+    "define void @test() {\n"
+    "entry:\n"
+    "  br label %loop\n"
+    "loop:\n"
+    "  %x = call i1 @switch()\n"
+    "  br i1 %x, label %nextloopblock, label %exit\n"
+    "nextloopblock:\n"
+    "  %y = call i1 @switch()\n"
+    "  br i1 %y, label %left, label %right\n"
+    "left:\n"
+    "  %A = bitcast i8 undef to i8\n"
+    "  br label %loop\n"
+    "right:\n"
+    "  %B = bitcast i8 undef to i8\n"
+    "  br label %loop\n"
+    "exit:\n"
+    "  ret void\n"
+    "}";
+
+TEST_F(IsPotentiallyReachableTest, BranchInsideLoop) {
+  ParseAssembly(BranchInsideLoopIR);
+  ExpectPath(true);
+}
+
+TEST_F(IsPotentiallyReachableTest, ModifyTest) {
+  ParseAssembly(BranchInsideLoopIR);
+
+  succ_iterator S = succ_begin(&*++M->getFunction("test")->begin());
+  BasicBlock *OldBB = S[0];
+  S[0] = S[1];
+  ExpectPath(false);
+  S[0] = OldBB;
+  ExpectPath(true);
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp b/src/llvm-project/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp
new file mode 100644
index 0000000..60da2bb
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp
@@ -0,0 +1,1281 @@
+//===- CGSCCPassManagerTest.cpp -------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+class TestModuleAnalysis : public AnalysisInfoMixin<TestModuleAnalysis> {
+public:
+  struct Result {
+    Result(int Count) : FunctionCount(Count) {}
+    int FunctionCount;
+  };
+
+  TestModuleAnalysis(int &Runs) : Runs(Runs) {}
+
+  Result run(Module &M, ModuleAnalysisManager &AM) {
+    ++Runs;
+    return Result(M.size());
+  }
+
+private:
+  friend AnalysisInfoMixin<TestModuleAnalysis>;
+  static AnalysisKey Key;
+
+  int &Runs;
+};
+
+AnalysisKey TestModuleAnalysis::Key;
+
+class TestSCCAnalysis : public AnalysisInfoMixin<TestSCCAnalysis> {
+public:
+  struct Result {
+    Result(int Count) : FunctionCount(Count) {}
+    int FunctionCount;
+  };
+
+  TestSCCAnalysis(int &Runs) : Runs(Runs) {}
+
+  Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, LazyCallGraph &) {
+    ++Runs;
+    return Result(C.size());
+  }
+
+private:
+  friend AnalysisInfoMixin<TestSCCAnalysis>;
+  static AnalysisKey Key;
+
+  int &Runs;
+};
+
+AnalysisKey TestSCCAnalysis::Key;
+
+class TestFunctionAnalysis : public AnalysisInfoMixin<TestFunctionAnalysis> {
+public:
+  struct Result {
+    Result(int Count) : InstructionCount(Count) {}
+    int InstructionCount;
+  };
+
+  TestFunctionAnalysis(int &Runs) : Runs(Runs) {}
+
+  Result run(Function &F, FunctionAnalysisManager &AM) {
+    ++Runs;
+    int Count = 0;
+    for (Instruction &I : instructions(F)) {
+      (void)I;
+      ++Count;
+    }
+    return Result(Count);
+  }
+
+private:
+  friend AnalysisInfoMixin<TestFunctionAnalysis>;
+  static AnalysisKey Key;
+
+  int &Runs;
+};
+
+AnalysisKey TestFunctionAnalysis::Key;
+
+class TestImmutableFunctionAnalysis
+    : public AnalysisInfoMixin<TestImmutableFunctionAnalysis> {
+public:
+  struct Result {
+    bool invalidate(Function &, const PreservedAnalyses &,
+                    FunctionAnalysisManager::Invalidator &) {
+      return false;
+    }
+  };
+
+  TestImmutableFunctionAnalysis(int &Runs) : Runs(Runs) {}
+
+  Result run(Function &F, FunctionAnalysisManager &AM) {
+    ++Runs;
+    return Result();
+  }
+
+private:
+  friend AnalysisInfoMixin<TestImmutableFunctionAnalysis>;
+  static AnalysisKey Key;
+
+  int &Runs;
+};
+
+AnalysisKey TestImmutableFunctionAnalysis::Key;
+
+struct LambdaModulePass : public PassInfoMixin<LambdaModulePass> {
+  template <typename T>
+  LambdaModulePass(T &&Arg) : Func(std::forward<T>(Arg)) {}
+
+  PreservedAnalyses run(Module &F, ModuleAnalysisManager &AM) {
+    return Func(F, AM);
+  }
+
+  std::function<PreservedAnalyses(Module &, ModuleAnalysisManager &)> Func;
+};
+
+struct LambdaSCCPass : public PassInfoMixin<LambdaSCCPass> {
+  template <typename T> LambdaSCCPass(T &&Arg) : Func(std::forward<T>(Arg)) {}
+
+  PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+    return Func(C, AM, CG, UR);
+  }
+
+  std::function<PreservedAnalyses(LazyCallGraph::SCC &, CGSCCAnalysisManager &,
+                                  LazyCallGraph &, CGSCCUpdateResult &)>
+      Func;
+};
+
+struct LambdaFunctionPass : public PassInfoMixin<LambdaFunctionPass> {
+  template <typename T>
+  LambdaFunctionPass(T &&Arg) : Func(std::forward<T>(Arg)) {}
+
+  PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
+    return Func(F, AM);
+  }
+
+  std::function<PreservedAnalyses(Function &, FunctionAnalysisManager &)> Func;
+};
+
+std::unique_ptr<Module> parseIR(const char *IR) {
+  // We just use a static context here. This is never called from multiple
+  // threads so it is harmless no matter how it is implemented. We just need
+  // the context to outlive the module which it does.
+  static LLVMContext C;
+  SMDiagnostic Err;
+  return parseAssemblyString(IR, Err, C);
+}
+
+class CGSCCPassManagerTest : public ::testing::Test {
+protected:
+  LLVMContext Context;
+  FunctionAnalysisManager FAM;
+  CGSCCAnalysisManager CGAM;
+  ModuleAnalysisManager MAM;
+
+  std::unique_ptr<Module> M;
+
+public:
+  CGSCCPassManagerTest()
+      : FAM(/*DebugLogging*/ true), CGAM(/*DebugLogging*/ true),
+        MAM(/*DebugLogging*/ true),
+        M(parseIR(
+            // Define a module with the following call graph, where calls go
+            // out the bottom of nodes and enter the top:
+            //
+            // f
+            // |\   _
+            // | \ / |
+            // g  h1 |
+            // |  |  |
+            // |  h2 |
+            // |  |  |
+            // |  h3 |
+            // | / \_/
+            // |/
+            // x
+            //
+            "define void @f() {\n"
+            "entry:\n"
+            "  call void @g()\n"
+            "  call void @h1()\n"
+            "  ret void\n"
+            "}\n"
+            "define void @g() {\n"
+            "entry:\n"
+            "  call void @g()\n"
+            "  call void @x()\n"
+            "  ret void\n"
+            "}\n"
+            "define void @h1() {\n"
+            "entry:\n"
+            "  call void @h2()\n"
+            "  ret void\n"
+            "}\n"
+            "define void @h2() {\n"
+            "entry:\n"
+            "  call void @h3()\n"
+            "  call void @x()\n"
+            "  ret void\n"
+            "}\n"
+            "define void @h3() {\n"
+            "entry:\n"
+            "  call void @h1()\n"
+            "  ret void\n"
+            "}\n"
+            "define void @x() {\n"
+            "entry:\n"
+            "  ret void\n"
+            "}\n")) {
+    MAM.registerPass([&] { return TargetLibraryAnalysis(); });
+    MAM.registerPass([&] { return LazyCallGraphAnalysis(); });
+    MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
+
+    // Register required pass instrumentation analysis.
+    MAM.registerPass([&] { return PassInstrumentationAnalysis(); });
+    CGAM.registerPass([&] { return PassInstrumentationAnalysis(); });
+    FAM.registerPass([&] { return PassInstrumentationAnalysis(); });
+
+    // Cross-register proxies.
+    MAM.registerPass([&] { return CGSCCAnalysisManagerModuleProxy(CGAM); });
+    CGAM.registerPass([&] { return FunctionAnalysisManagerCGSCCProxy(); });
+    CGAM.registerPass([&] { return ModuleAnalysisManagerCGSCCProxy(MAM); });
+    FAM.registerPass([&] { return CGSCCAnalysisManagerFunctionProxy(CGAM); });
+    FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
+  }
+};
+
+TEST_F(CGSCCPassManagerTest, Basic) {
+  int FunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+  int ImmutableFunctionAnalysisRuns = 0;
+  FAM.registerPass([&] {
+    return TestImmutableFunctionAnalysis(ImmutableFunctionAnalysisRuns);
+  });
+
+  int SCCAnalysisRuns = 0;
+  CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+
+  int ModuleAnalysisRuns = 0;
+  MAM.registerPass([&] { return TestModuleAnalysis(ModuleAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  FunctionPassManager FPM1(/*DebugLogging*/ true);
+  int FunctionPassRunCount1 = 0;
+  FPM1.addPass(LambdaFunctionPass([&](Function &, FunctionAnalysisManager &) {
+    ++FunctionPassRunCount1;
+    return PreservedAnalyses::none();
+  }));
+  CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+
+  int SCCPassRunCount1 = 0;
+  int AnalyzedInstrCount1 = 0;
+  int AnalyzedSCCFunctionCount1 = 0;
+  int AnalyzedModuleFunctionCount1 = 0;
+  CGPM1.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+        ++SCCPassRunCount1;
+
+        const ModuleAnalysisManager &MAM =
+            AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG).getManager();
+        FunctionAnalysisManager &FAM =
+            AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
+        if (TestModuleAnalysis::Result *TMA =
+                MAM.getCachedResult<TestModuleAnalysis>(
+                    *C.begin()->getFunction().getParent()))
+          AnalyzedModuleFunctionCount1 += TMA->FunctionCount;
+
+        TestSCCAnalysis::Result &AR = AM.getResult<TestSCCAnalysis>(C, CG);
+        AnalyzedSCCFunctionCount1 += AR.FunctionCount;
+        for (LazyCallGraph::Node &N : C) {
+          TestFunctionAnalysis::Result &FAR =
+              FAM.getResult<TestFunctionAnalysis>(N.getFunction());
+          AnalyzedInstrCount1 += FAR.InstructionCount;
+
+          // Just ensure we get the immutable results.
+          (void)FAM.getResult<TestImmutableFunctionAnalysis>(N.getFunction());
+        }
+
+        return PreservedAnalyses::all();
+      }));
+
+  FunctionPassManager FPM2(/*DebugLogging*/ true);
+  int FunctionPassRunCount2 = 0;
+  FPM2.addPass(LambdaFunctionPass([&](Function &, FunctionAnalysisManager &) {
+    ++FunctionPassRunCount2;
+    return PreservedAnalyses::none();
+  }));
+  CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM2)));
+
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  FunctionPassManager FPM3(/*DebugLogging*/ true);
+  int FunctionPassRunCount3 = 0;
+  FPM3.addPass(LambdaFunctionPass([&](Function &, FunctionAnalysisManager &) {
+    ++FunctionPassRunCount3;
+    return PreservedAnalyses::none();
+  }));
+  MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM3)));
+
+  MPM.run(*M, MAM);
+
+  EXPECT_EQ(4, SCCPassRunCount1);
+  EXPECT_EQ(6, FunctionPassRunCount1);
+  EXPECT_EQ(6, FunctionPassRunCount2);
+  EXPECT_EQ(6, FunctionPassRunCount3);
+
+  EXPECT_EQ(1, ModuleAnalysisRuns);
+  EXPECT_EQ(4, SCCAnalysisRuns);
+  EXPECT_EQ(6, FunctionAnalysisRuns);
+  EXPECT_EQ(6, ImmutableFunctionAnalysisRuns);
+
+  EXPECT_EQ(14, AnalyzedInstrCount1);
+  EXPECT_EQ(6, AnalyzedSCCFunctionCount1);
+  EXPECT_EQ(4 * 6, AnalyzedModuleFunctionCount1);
+}
+
+// Test that an SCC pass which fails to preserve a module analysis does in fact
+// invalidate that module analysis.
+TEST_F(CGSCCPassManagerTest, TestSCCPassInvalidatesModuleAnalysis) {
+  int ModuleAnalysisRuns = 0;
+  MAM.registerPass([&] { return TestModuleAnalysis(ModuleAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+
+  // The first CGSCC run we preserve everything and make sure that works and
+  // the module analysis is available in the second CGSCC run from the one
+  // required module pass above.
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  int CountFoundModuleAnalysis1 = 0;
+  CGPM1.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+        const auto &MAM =
+            AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG).getManager();
+        auto *TMA = MAM.getCachedResult<TestModuleAnalysis>(
+            *C.begin()->getFunction().getParent());
+
+        if (TMA)
+          ++CountFoundModuleAnalysis1;
+
+        return PreservedAnalyses::all();
+      }));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // The second CGSCC run checks that the module analysis got preserved the
+  // previous time and in one SCC fails to preserve it.
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  int CountFoundModuleAnalysis2 = 0;
+  CGPM2.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+        const auto &MAM =
+            AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG).getManager();
+        auto *TMA = MAM.getCachedResult<TestModuleAnalysis>(
+            *C.begin()->getFunction().getParent());
+
+        if (TMA)
+          ++CountFoundModuleAnalysis2;
+
+        // Only fail to preserve analyses on one SCC and make sure that gets
+        // propagated.
+        return C.getName() == "(g)" ? PreservedAnalyses::none()
+                                  : PreservedAnalyses::all();
+      }));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  // The third CGSCC run should fail to find a cached module analysis as it
+  // should have been invalidated by the above CGSCC run.
+  CGSCCPassManager CGPM3(/*DebugLogging*/ true);
+  int CountFoundModuleAnalysis3 = 0;
+  CGPM3.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+        const auto &MAM =
+            AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG).getManager();
+        auto *TMA = MAM.getCachedResult<TestModuleAnalysis>(
+            *C.begin()->getFunction().getParent());
+
+        if (TMA)
+          ++CountFoundModuleAnalysis3;
+
+        return PreservedAnalyses::none();
+      }));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM3)));
+
+  MPM.run(*M, MAM);
+
+  EXPECT_EQ(1, ModuleAnalysisRuns);
+  EXPECT_EQ(4, CountFoundModuleAnalysis1);
+  EXPECT_EQ(4, CountFoundModuleAnalysis2);
+  EXPECT_EQ(0, CountFoundModuleAnalysis3);
+}
+
+// Similar to the above, but test that this works for function passes embedded
+// *within* a CGSCC layer.
+TEST_F(CGSCCPassManagerTest, TestFunctionPassInsideCGSCCInvalidatesModuleAnalysis) {
+  int ModuleAnalysisRuns = 0;
+  MAM.registerPass([&] { return TestModuleAnalysis(ModuleAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+
+  // The first run we preserve everything and make sure that works and the
+  // module analysis is available in the second run from the one required
+  // module pass above.
+  FunctionPassManager FPM1(/*DebugLogging*/ true);
+  // Start true and mark false if we ever failed to find a module analysis
+  // because we expect this to succeed for each SCC.
+  bool FoundModuleAnalysis1 = true;
+  FPM1.addPass(
+      LambdaFunctionPass([&](Function &F, FunctionAnalysisManager &AM) {
+        const auto &MAM =
+            AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
+        auto *TMA = MAM.getCachedResult<TestModuleAnalysis>(*F.getParent());
+
+        if (!TMA)
+          FoundModuleAnalysis1 = false;
+
+        return PreservedAnalyses::all();
+      }));
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // The second run checks that the module analysis got preserved the previous
+  // time and in one function fails to preserve it.
+  FunctionPassManager FPM2(/*DebugLogging*/ true);
+  // Again, start true and mark false if we ever failed to find a module analysis
+  // because we expect this to succeed for each SCC.
+  bool FoundModuleAnalysis2 = true;
+  FPM2.addPass(
+      LambdaFunctionPass([&](Function &F, FunctionAnalysisManager &AM) {
+        const auto &MAM =
+            AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
+        auto *TMA = MAM.getCachedResult<TestModuleAnalysis>(*F.getParent());
+
+        if (!TMA)
+          FoundModuleAnalysis2 = false;
+
+        // Only fail to preserve analyses on one SCC and make sure that gets
+        // propagated.
+        return F.getName() == "h2" ? PreservedAnalyses::none()
+                                   : PreservedAnalyses::all();
+      }));
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM2)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  // The third run should fail to find a cached module analysis as it should
+  // have been invalidated by the above run.
+  FunctionPassManager FPM3(/*DebugLogging*/ true);
+  // Start false and mark true if we ever *succeeded* to find a module
+  // analysis, as we expect this to fail for every function.
+  bool FoundModuleAnalysis3 = false;
+  FPM3.addPass(
+      LambdaFunctionPass([&](Function &F, FunctionAnalysisManager &AM) {
+        const auto &MAM =
+            AM.getResult<ModuleAnalysisManagerFunctionProxy>(F).getManager();
+        auto *TMA = MAM.getCachedResult<TestModuleAnalysis>(*F.getParent());
+
+        if (TMA)
+          FoundModuleAnalysis3 = true;
+
+        return PreservedAnalyses::none();
+      }));
+  CGSCCPassManager CGPM3(/*DebugLogging*/ true);
+  CGPM3.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM3)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM3)));
+
+  MPM.run(*M, MAM);
+
+  EXPECT_EQ(1, ModuleAnalysisRuns);
+  EXPECT_TRUE(FoundModuleAnalysis1);
+  EXPECT_TRUE(FoundModuleAnalysis2);
+  EXPECT_FALSE(FoundModuleAnalysis3);
+}
+
+// Test that a Module pass which fails to preserve an SCC analysis in fact
+// invalidates that analysis.
+TEST_F(CGSCCPassManagerTest, TestModulePassInvalidatesSCCAnalysis) {
+  int SCCAnalysisRuns = 0;
+  CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  CGPM1.addPass(RequireAnalysisPass<TestSCCAnalysis, LazyCallGraph::SCC,
+                                    CGSCCAnalysisManager, LazyCallGraph &,
+                                    CGSCCUpdateResult &>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // Now run a module pass that preserves the LazyCallGraph and the proxy but
+  // not the SCC analysis.
+  MPM.addPass(LambdaModulePass([&](Module &M, ModuleAnalysisManager &) {
+    PreservedAnalyses PA;
+    PA.preserve<LazyCallGraphAnalysis>();
+    PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+    PA.preserve<FunctionAnalysisManagerModuleProxy>();
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again. This
+  // will trigger re-running it.
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(RequireAnalysisPass<TestSCCAnalysis, LazyCallGraph::SCC,
+                                    CGSCCAnalysisManager, LazyCallGraph &,
+                                    CGSCCUpdateResult &>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  MPM.run(*M, MAM);
+  // Two runs and four SCCs.
+  EXPECT_EQ(2 * 4, SCCAnalysisRuns);
+}
+
+// Check that marking the SCC analysis preserved is sufficient to avoid
+// invaliadtion. This should only run the analysis once for each SCC.
+TEST_F(CGSCCPassManagerTest, TestModulePassCanPreserveSCCAnalysis) {
+  int SCCAnalysisRuns = 0;
+  CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  CGPM1.addPass(RequireAnalysisPass<TestSCCAnalysis, LazyCallGraph::SCC,
+                                    CGSCCAnalysisManager, LazyCallGraph &,
+                                    CGSCCUpdateResult &>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // Now run a module pass that preserves each of the necessary components
+  // (but not everything).
+  MPM.addPass(LambdaModulePass([&](Module &M, ModuleAnalysisManager &) {
+    PreservedAnalyses PA;
+    PA.preserve<LazyCallGraphAnalysis>();
+    PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+    PA.preserve<FunctionAnalysisManagerModuleProxy>();
+    PA.preserve<TestSCCAnalysis>();
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again but find
+  // it in the cache.
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(RequireAnalysisPass<TestSCCAnalysis, LazyCallGraph::SCC,
+                                    CGSCCAnalysisManager, LazyCallGraph &,
+                                    CGSCCUpdateResult &>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  MPM.run(*M, MAM);
+  // Four SCCs
+  EXPECT_EQ(4, SCCAnalysisRuns);
+}
+
+// Check that even when the analysis is preserved, if the SCC information isn't
+// we still nuke things because the SCC keys could change.
+TEST_F(CGSCCPassManagerTest, TestModulePassInvalidatesSCCAnalysisOnCGChange) {
+  int SCCAnalysisRuns = 0;
+  CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  CGPM1.addPass(RequireAnalysisPass<TestSCCAnalysis, LazyCallGraph::SCC,
+                                    CGSCCAnalysisManager, LazyCallGraph &,
+                                    CGSCCUpdateResult &>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // Now run a module pass that preserves the analysis but not the call
+  // graph or proxy.
+  MPM.addPass(LambdaModulePass([&](Module &M, ModuleAnalysisManager &) {
+    PreservedAnalyses PA;
+    PA.preserve<TestSCCAnalysis>();
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again.
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(RequireAnalysisPass<TestSCCAnalysis, LazyCallGraph::SCC,
+                                    CGSCCAnalysisManager, LazyCallGraph &,
+                                    CGSCCUpdateResult &>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  MPM.run(*M, MAM);
+  // Two runs and four SCCs.
+  EXPECT_EQ(2 * 4, SCCAnalysisRuns);
+}
+
+// Test that an SCC pass which fails to preserve a Function analysis in fact
+// invalidates that analysis.
+TEST_F(CGSCCPassManagerTest, TestSCCPassInvalidatesFunctionAnalysis) {
+  int FunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+
+  // Create a very simple module with a single function and SCC to make testing
+  // these issues much easier.
+  std::unique_ptr<Module> M = parseIR("declare void @g()\n"
+                                      "declare void @h()\n"
+                                      "define void @f() {\n"
+                                      "entry:\n"
+                                      "  call void @g()\n"
+                                      "  call void @h()\n"
+                                      "  ret void\n"
+                                      "}\n");
+
+  CGSCCPassManager CGPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  FunctionPassManager FPM1(/*DebugLogging*/ true);
+  FPM1.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+
+  // Now run a module pass that preserves the LazyCallGraph and proxy but not
+  // the SCC analysis.
+  CGPM.addPass(LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &,
+                                 LazyCallGraph &, CGSCCUpdateResult &) {
+    PreservedAnalyses PA;
+    PA.preserve<LazyCallGraphAnalysis>();
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again. This
+  // will trigger re-running it.
+  FunctionPassManager FPM2(/*DebugLogging*/ true);
+  FPM2.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM2)));
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+  MPM.run(*M, MAM);
+  EXPECT_EQ(2, FunctionAnalysisRuns);
+}
+
+// Check that marking the SCC analysis preserved is sufficient. This should
+// only run the analysis once the SCC.
+TEST_F(CGSCCPassManagerTest, TestSCCPassCanPreserveFunctionAnalysis) {
+  int FunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+
+  // Create a very simple module with a single function and SCC to make testing
+  // these issues much easier.
+  std::unique_ptr<Module> M = parseIR("declare void @g()\n"
+                                      "declare void @h()\n"
+                                      "define void @f() {\n"
+                                      "entry:\n"
+                                      "  call void @g()\n"
+                                      "  call void @h()\n"
+                                      "  ret void\n"
+                                      "}\n");
+
+  CGSCCPassManager CGPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  FunctionPassManager FPM1(/*DebugLogging*/ true);
+  FPM1.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+
+  // Now run a module pass that preserves each of the necessary components
+  // (but
+  // not everything).
+  CGPM.addPass(LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &,
+                                 LazyCallGraph &, CGSCCUpdateResult &) {
+    PreservedAnalyses PA;
+    PA.preserve<LazyCallGraphAnalysis>();
+    PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+    PA.preserve<TestFunctionAnalysis>();
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again but find
+  // it in the cache.
+  FunctionPassManager FPM2(/*DebugLogging*/ true);
+  FPM2.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM2)));
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+  MPM.run(*M, MAM);
+  EXPECT_EQ(1, FunctionAnalysisRuns);
+}
+
+// Note that there is no test for invalidating the call graph or other
+// structure with an SCC pass because there is no mechanism to do that from
+// withinsuch a pass. Instead, such a pass has to directly update the call
+// graph structure.
+
+// Test that a madule pass invalidates function analyses when the CGSCC proxies
+// and pass manager.
+TEST_F(CGSCCPassManagerTest,
+       TestModulePassInvalidatesFunctionAnalysisNestedInCGSCC) {
+  MAM.registerPass([&] { return LazyCallGraphAnalysis(); });
+
+  int FunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  FunctionPassManager FPM1(/*DebugLogging*/ true);
+  FPM1.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // Now run a module pass that preserves the LazyCallGraph and proxies but not
+  // the Function analysis.
+  MPM.addPass(LambdaModulePass([&](Module &M, ModuleAnalysisManager &) {
+    PreservedAnalyses PA;
+    PA.preserve<LazyCallGraphAnalysis>();
+    PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+    PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+    PA.preserve<FunctionAnalysisManagerModuleProxy>();
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again. This
+  // will trigger re-running it.
+  FunctionPassManager FPM2(/*DebugLogging*/ true);
+  FPM2.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM2)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  MPM.run(*M, MAM);
+  // Two runs and 6 functions.
+  EXPECT_EQ(2 * 6, FunctionAnalysisRuns);
+}
+
+// Check that by marking the function pass and proxies as preserved, this
+// propagates all the way through.
+TEST_F(CGSCCPassManagerTest,
+       TestModulePassCanPreserveFunctionAnalysisNestedInCGSCC) {
+  MAM.registerPass([&] { return LazyCallGraphAnalysis(); });
+
+  int FunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  FunctionPassManager FPM1(/*DebugLogging*/ true);
+  FPM1.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // Now run a module pass that preserves the LazyCallGraph, the proxy, and
+  // the Function analysis.
+  MPM.addPass(LambdaModulePass([&](Module &M, ModuleAnalysisManager &) {
+    PreservedAnalyses PA;
+    PA.preserve<LazyCallGraphAnalysis>();
+    PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+    PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+    PA.preserve<FunctionAnalysisManagerModuleProxy>();
+    PA.preserve<TestFunctionAnalysis>();
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again. This
+  // will trigger re-running it.
+  FunctionPassManager FPM2(/*DebugLogging*/ true);
+  FPM2.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM2)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  MPM.run(*M, MAM);
+  // One run and 6 functions.
+  EXPECT_EQ(6, FunctionAnalysisRuns);
+}
+
+// Check that if the lazy call graph itself isn't preserved we still manage to
+// invalidate everything.
+TEST_F(CGSCCPassManagerTest,
+       TestModulePassInvalidatesFunctionAnalysisNestedInCGSCCOnCGChange) {
+  MAM.registerPass([&] { return LazyCallGraphAnalysis(); });
+
+  int FunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  // First force the analysis to be run.
+  FunctionPassManager FPM1(/*DebugLogging*/ true);
+  FPM1.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGSCCPassManager CGPM1(/*DebugLogging*/ true);
+  CGPM1.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM1)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM1)));
+
+  // Now run a module pass that preserves the LazyCallGraph but not the
+  // Function analysis.
+  MPM.addPass(LambdaModulePass([&](Module &M, ModuleAnalysisManager &) {
+    PreservedAnalyses PA;
+    return PA;
+  }));
+
+  // And now a second CGSCC run which requires the SCC analysis again. This
+  // will trigger re-running it.
+  FunctionPassManager FPM2(/*DebugLogging*/ true);
+  FPM2.addPass(RequireAnalysisPass<TestFunctionAnalysis, Function>());
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM2)));
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+
+  MPM.run(*M, MAM);
+  // Two runs and 6 functions.
+  EXPECT_EQ(2 * 6, FunctionAnalysisRuns);
+}
+
+/// A test CGSCC-level analysis pass which caches in its result another
+/// analysis pass and uses it to serve queries. This requires the result to
+/// invalidate itself when its dependency is invalidated.
+///
+/// FIXME: Currently this doesn't also depend on a function analysis, and if it
+/// did we would fail to invalidate it correctly.
+struct TestIndirectSCCAnalysis
+    : public AnalysisInfoMixin<TestIndirectSCCAnalysis> {
+  struct Result {
+    Result(TestSCCAnalysis::Result &SCCDep, TestModuleAnalysis::Result &MDep)
+        : SCCDep(SCCDep), MDep(MDep) {}
+    TestSCCAnalysis::Result &SCCDep;
+    TestModuleAnalysis::Result &MDep;
+
+    bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
+                    CGSCCAnalysisManager::Invalidator &Inv) {
+      auto PAC = PA.getChecker<TestIndirectSCCAnalysis>();
+      return !(PAC.preserved() ||
+               PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) ||
+             Inv.invalidate<TestSCCAnalysis>(C, PA);
+    }
+  };
+
+  TestIndirectSCCAnalysis(int &Runs) : Runs(Runs) {}
+
+  /// Run the analysis pass over the function and return a result.
+  Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+             LazyCallGraph &CG) {
+    ++Runs;
+    auto &SCCDep = AM.getResult<TestSCCAnalysis>(C, CG);
+
+    auto &ModuleProxy = AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C, CG);
+    const ModuleAnalysisManager &MAM = ModuleProxy.getManager();
+    // For the test, we insist that the module analysis starts off in the
+    // cache.
+    auto &MDep = *MAM.getCachedResult<TestModuleAnalysis>(
+        *C.begin()->getFunction().getParent());
+    // Register the dependency as module analysis dependencies have to be
+    // pre-registered on the proxy.
+    ModuleProxy.registerOuterAnalysisInvalidation<TestModuleAnalysis,
+                                                  TestIndirectSCCAnalysis>();
+
+    return Result(SCCDep, MDep);
+  }
+
+private:
+  friend AnalysisInfoMixin<TestIndirectSCCAnalysis>;
+  static AnalysisKey Key;
+
+  int &Runs;
+};
+
+AnalysisKey TestIndirectSCCAnalysis::Key;
+
+/// A test analysis pass which caches in its result the result from the above
+/// indirect analysis pass.
+///
+/// This allows us to ensure that whenever an analysis pass is invalidated due
+/// to dependencies (especially dependencies across IR units that trigger
+/// asynchronous invalidation) we correctly detect that this may in turn cause
+/// other analysis to be invalidated.
+struct TestDoublyIndirectSCCAnalysis
+    : public AnalysisInfoMixin<TestDoublyIndirectSCCAnalysis> {
+  struct Result {
+    Result(TestIndirectSCCAnalysis::Result &IDep) : IDep(IDep) {}
+    TestIndirectSCCAnalysis::Result &IDep;
+
+    bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
+                    CGSCCAnalysisManager::Invalidator &Inv) {
+      auto PAC = PA.getChecker<TestDoublyIndirectSCCAnalysis>();
+      return !(PAC.preserved() ||
+               PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) ||
+             Inv.invalidate<TestIndirectSCCAnalysis>(C, PA);
+    }
+  };
+
+  TestDoublyIndirectSCCAnalysis(int &Runs) : Runs(Runs) {}
+
+  /// Run the analysis pass over the function and return a result.
+  Result run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+             LazyCallGraph &CG) {
+    ++Runs;
+    auto &IDep = AM.getResult<TestIndirectSCCAnalysis>(C, CG);
+    return Result(IDep);
+  }
+
+private:
+  friend AnalysisInfoMixin<TestDoublyIndirectSCCAnalysis>;
+  static AnalysisKey Key;
+
+  int &Runs;
+};
+
+AnalysisKey TestDoublyIndirectSCCAnalysis::Key;
+
+/// A test analysis pass which caches results from three different IR unit
+/// layers and requires intermediate layers to correctly propagate the entire
+/// distance.
+struct TestIndirectFunctionAnalysis
+    : public AnalysisInfoMixin<TestIndirectFunctionAnalysis> {
+  struct Result {
+    Result(TestFunctionAnalysis::Result &FDep, TestModuleAnalysis::Result &MDep,
+           TestSCCAnalysis::Result &SCCDep)
+        : FDep(FDep), MDep(MDep), SCCDep(SCCDep) {}
+    TestFunctionAnalysis::Result &FDep;
+    TestModuleAnalysis::Result &MDep;
+    TestSCCAnalysis::Result &SCCDep;
+
+    bool invalidate(Function &F, const PreservedAnalyses &PA,
+                    FunctionAnalysisManager::Invalidator &Inv) {
+      auto PAC = PA.getChecker<TestIndirectFunctionAnalysis>();
+      return !(PAC.preserved() ||
+               PAC.preservedSet<AllAnalysesOn<Function>>()) ||
+             Inv.invalidate<TestFunctionAnalysis>(F, PA);
+    }
+  };
+
+  TestIndirectFunctionAnalysis(int &Runs) : Runs(Runs) {}
+
+  /// Run the analysis pass over the function and return a result.
+  Result run(Function &F, FunctionAnalysisManager &AM) {
+    ++Runs;
+    auto &FDep = AM.getResult<TestFunctionAnalysis>(F);
+
+    auto &ModuleProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
+    const ModuleAnalysisManager &MAM = ModuleProxy.getManager();
+    // For the test, we insist that the module analysis starts off in the
+    // cache.
+    auto &MDep = *MAM.getCachedResult<TestModuleAnalysis>(*F.getParent());
+    // Register the dependency as module analysis dependencies have to be
+    // pre-registered on the proxy.
+    ModuleProxy.registerOuterAnalysisInvalidation<
+        TestModuleAnalysis, TestIndirectFunctionAnalysis>();
+
+    // For thet test we assume this is run inside a CGSCC pass manager.
+    const LazyCallGraph &CG =
+        *MAM.getCachedResult<LazyCallGraphAnalysis>(*F.getParent());
+    auto &CGSCCProxy = AM.getResult<CGSCCAnalysisManagerFunctionProxy>(F);
+    const CGSCCAnalysisManager &CGAM = CGSCCProxy.getManager();
+    // For the test, we insist that the CGSCC analysis starts off in the cache.
+    auto &SCCDep =
+        *CGAM.getCachedResult<TestSCCAnalysis>(*CG.lookupSCC(*CG.lookup(F)));
+    // Register the dependency as CGSCC analysis dependencies have to be
+    // pre-registered on the proxy.
+    CGSCCProxy.registerOuterAnalysisInvalidation<
+        TestSCCAnalysis, TestIndirectFunctionAnalysis>();
+
+    return Result(FDep, MDep, SCCDep);
+  }
+
+private:
+  friend AnalysisInfoMixin<TestIndirectFunctionAnalysis>;
+  static AnalysisKey Key;
+
+  int &Runs;
+};
+
+AnalysisKey TestIndirectFunctionAnalysis::Key;
+
+TEST_F(CGSCCPassManagerTest, TestIndirectAnalysisInvalidation) {
+  int ModuleAnalysisRuns = 0;
+  MAM.registerPass([&] { return TestModuleAnalysis(ModuleAnalysisRuns); });
+
+  int SCCAnalysisRuns = 0, IndirectSCCAnalysisRuns = 0,
+      DoublyIndirectSCCAnalysisRuns = 0;
+  CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+  CGAM.registerPass(
+      [&] { return TestIndirectSCCAnalysis(IndirectSCCAnalysisRuns); });
+  CGAM.registerPass([&] {
+    return TestDoublyIndirectSCCAnalysis(DoublyIndirectSCCAnalysisRuns);
+  });
+
+  int FunctionAnalysisRuns = 0, IndirectFunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+  FAM.registerPass([&] {
+    return TestIndirectFunctionAnalysis(IndirectFunctionAnalysisRuns);
+  });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  int FunctionCount = 0;
+  CGSCCPassManager CGPM(/*DebugLogging*/ true);
+  // First just use the analysis to get the function count and preserve
+  // everything.
+  CGPM.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &) {
+        auto &DoublyIndirectResult =
+            AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+        auto &IndirectResult = DoublyIndirectResult.IDep;
+        FunctionCount += IndirectResult.SCCDep.FunctionCount;
+        return PreservedAnalyses::all();
+      }));
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>()));
+
+  // Next, invalidate
+  //   - both analyses for the (f) and (x) SCCs,
+  //   - just the underlying (indirect) analysis for (g) SCC, and
+  //   - just the direct analysis for (h1,h2,h3) SCC.
+  CGPM.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &) {
+        auto &DoublyIndirectResult =
+            AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+        auto &IndirectResult = DoublyIndirectResult.IDep;
+        FunctionCount += IndirectResult.SCCDep.FunctionCount;
+        auto PA = PreservedAnalyses::none();
+        PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+        PA.preserveSet<AllAnalysesOn<Function>>();
+        if (C.getName() == "(g)")
+          PA.preserve<TestSCCAnalysis>();
+        else if (C.getName() == "(h3, h1, h2)")
+          PA.preserve<TestIndirectSCCAnalysis>();
+        return PA;
+      }));
+  // Finally, use the analysis again on each SCC (and function), forcing
+  // re-computation for all of them.
+  CGPM.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &) {
+        auto &DoublyIndirectResult =
+            AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+        auto &IndirectResult = DoublyIndirectResult.IDep;
+        FunctionCount += IndirectResult.SCCDep.FunctionCount;
+        return PreservedAnalyses::all();
+      }));
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>()));
+
+  // Create a second CGSCC pass manager. This will cause the module-level
+  // invalidation to occur, which will force yet another invalidation of the
+  // indirect SCC-level analysis as the module analysis it depends on gets
+  // invalidated.
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &) {
+        auto &DoublyIndirectResult =
+            AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+        auto &IndirectResult = DoublyIndirectResult.IDep;
+        FunctionCount += IndirectResult.SCCDep.FunctionCount;
+        return PreservedAnalyses::all();
+      }));
+  CGPM2.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>()));
+
+  // Add a requires pass to populate the module analysis and then our CGSCC
+  // pass pipeline.
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+  // Now require the module analysis again (it will have been invalidated once)
+  // and then use it again from our second CGSCC pipeline..
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+  MPM.run(*M, MAM);
+
+  // There are generally two possible runs for each of the four SCCs. But
+  // for one SCC, we only invalidate the indirect analysis so the base one
+  // only gets run seven times.
+  EXPECT_EQ(7, SCCAnalysisRuns);
+  // The module analysis pass should be run twice here.
+  EXPECT_EQ(2, ModuleAnalysisRuns);
+  // The indirect analysis is invalidated (either directly or indirectly) three
+  // times for each of four SCCs.
+  EXPECT_EQ(3 * 4, IndirectSCCAnalysisRuns);
+  EXPECT_EQ(3 * 4, DoublyIndirectSCCAnalysisRuns);
+
+  // We run the indirect function analysis once per function the first time.
+  // Then we re-run it for every SCC but "(g)". Then we re-run it for every
+  // function again.
+  EXPECT_EQ(6 + 5 + 6, IndirectFunctionAnalysisRuns);
+
+  // Four passes count each of six functions once (via SCCs).
+  EXPECT_EQ(4 * 6, FunctionCount);
+}
+
+TEST_F(CGSCCPassManagerTest, TestAnalysisInvalidationCGSCCUpdate) {
+  int ModuleAnalysisRuns = 0;
+  MAM.registerPass([&] { return TestModuleAnalysis(ModuleAnalysisRuns); });
+
+  int SCCAnalysisRuns = 0, IndirectSCCAnalysisRuns = 0,
+      DoublyIndirectSCCAnalysisRuns = 0;
+  CGAM.registerPass([&] { return TestSCCAnalysis(SCCAnalysisRuns); });
+  CGAM.registerPass(
+      [&] { return TestIndirectSCCAnalysis(IndirectSCCAnalysisRuns); });
+  CGAM.registerPass([&] {
+    return TestDoublyIndirectSCCAnalysis(DoublyIndirectSCCAnalysisRuns);
+  });
+
+  int FunctionAnalysisRuns = 0, IndirectFunctionAnalysisRuns = 0;
+  FAM.registerPass([&] { return TestFunctionAnalysis(FunctionAnalysisRuns); });
+  FAM.registerPass([&] {
+    return TestIndirectFunctionAnalysis(IndirectFunctionAnalysisRuns);
+  });
+
+  ModulePassManager MPM(/*DebugLogging*/ true);
+
+  CGSCCPassManager CGPM(/*DebugLogging*/ true);
+  // First just use the analysis to get the function count and preserve
+  // everything.
+  using RequireTestIndirectFunctionAnalysisPass =
+      RequireAnalysisPass<TestIndirectFunctionAnalysis, Function>;
+  using RequireTestDoublyIndirectSCCAnalysisPass =
+      RequireAnalysisPass<TestDoublyIndirectSCCAnalysis, LazyCallGraph::SCC,
+                          CGSCCAnalysisManager, LazyCallGraph &,
+                          CGSCCUpdateResult &>;
+  CGPM.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireTestIndirectFunctionAnalysisPass()));
+
+  // Next, we inject an SCC pass that invalidates everything for the `(h3, h1,
+  // h2)` SCC but also deletes the call edge from `h2` to `h3` and updates the
+  // CG. This should successfully invalidate (and force to be re-run) all the
+  // analyses for that SCC and for the functions.
+  CGPM.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+        (void)AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+        if (C.getName() != "(h3, h1, h2)")
+          return PreservedAnalyses::all();
+
+        // Build the preserved set.
+        auto PA = PreservedAnalyses::none();
+        PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+        PA.preserve<TestIndirectSCCAnalysis>();
+        PA.preserve<TestDoublyIndirectSCCAnalysis>();
+
+        // Delete the call from `h2` to `h3`.
+        auto &H2N = *llvm::find_if(
+            C, [](LazyCallGraph::Node &N) { return N.getName() == "h2"; });
+        auto &H2F = H2N.getFunction();
+        auto &H3F = *cast<CallInst>(H2F.begin()->begin())->getCalledFunction();
+        assert(H3F.getName() == "h3" && "Wrong called function!");
+        H2F.begin()->begin()->eraseFromParent();
+        // Insert a bitcast of `h3` so that we retain a ref edge to it.
+        (void)CastInst::CreatePointerCast(&H3F,
+                                          Type::getInt8PtrTy(H2F.getContext()),
+                                          "dummy", &*H2F.begin()->begin());
+
+        // Now update the call graph.
+        auto &NewC =
+            updateCGAndAnalysisManagerForFunctionPass(CG, C, H2N, AM, UR);
+        assert(&NewC != &C && "Should get a new SCC due to update!");
+        (void)&NewC;
+
+        return PA;
+      }));
+  // Now use the analysis again on each SCC and function, forcing
+  // re-computation for all of them.
+  CGPM.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+  CGPM.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireTestIndirectFunctionAnalysisPass()));
+
+  // Create another CGSCC pipeline that requires all the analyses again.
+  CGSCCPassManager CGPM2(/*DebugLogging*/ true);
+  CGPM2.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+  CGPM2.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireTestIndirectFunctionAnalysisPass()));
+
+  // Next we inject an SCC pass that finds the `(h2)` SCC, adds a call to `h3`
+  // back to `h2`, and then invalidates everything for what will then be the
+  // `(h3, h1, h2)` SCC again.
+  CGSCCPassManager CGPM3(/*DebugLogging*/ true);
+  CGPM3.addPass(
+      LambdaSCCPass([&](LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
+                        LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+        (void)AM.getResult<TestDoublyIndirectSCCAnalysis>(C, CG);
+        if (C.getName() != "(h2)")
+          return PreservedAnalyses::all();
+
+        // Build the preserved set.
+        auto PA = PreservedAnalyses::none();
+        PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+        PA.preserve<TestIndirectSCCAnalysis>();
+        PA.preserve<TestDoublyIndirectSCCAnalysis>();
+
+        // Delete the bitcast of `h3` that we added earlier.
+        auto &H2N = *C.begin();
+        auto &H2F = H2N.getFunction();
+        auto &H3F = *cast<Function>(cast<BitCastInst>(H2F.begin()->begin())->getOperand(0));
+        assert(H3F.getName() == "h3" && "Wrong called function!");
+        H2F.begin()->begin()->eraseFromParent();
+        // And insert a call to `h3`.
+        (void)CallInst::Create(&H3F, {}, "", &*H2F.begin()->begin());
+
+        // Now update the call graph.
+        auto &NewC =
+            updateCGAndAnalysisManagerForFunctionPass(CG, C, H2N, AM, UR);
+        assert(&NewC != &C && "Should get a new SCC due to update!");
+        (void)&NewC;
+
+        return PA;
+      }));
+  // Now use the analysis again on each SCC and function, forcing
+  // re-computation for all of them.
+  CGPM3.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+  CGPM3.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireTestIndirectFunctionAnalysisPass()));
+
+  // Create a second CGSCC pass manager. This will cause the module-level
+  // invalidation to occur, which will force yet another invalidation of the
+  // indirect SCC-level analysis as the module analysis it depends on gets
+  // invalidated.
+  CGSCCPassManager CGPM4(/*DebugLogging*/ true);
+  CGPM4.addPass(RequireTestDoublyIndirectSCCAnalysisPass());
+  CGPM4.addPass(createCGSCCToFunctionPassAdaptor(
+      RequireTestIndirectFunctionAnalysisPass()));
+
+  // Add a requires pass to populate the module analysis and then one of our
+  // CGSCC pipelines. Repeat for all four CGSCC pipelines.
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM2)));
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM3)));
+  MPM.addPass(RequireAnalysisPass<TestModuleAnalysis, Module>());
+  MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM4)));
+  MPM.run(*M, MAM);
+
+  // We run over four SCCs the first time. But then we split an SCC into three.
+  // And then we merge those three back into one.
+  EXPECT_EQ(4 + 3 + 1, SCCAnalysisRuns);
+  // The module analysis pass should be run three times.
+  EXPECT_EQ(3, ModuleAnalysisRuns);
+  // We run over four SCCs the first time. Then over the two new ones. Then the
+  // entire module is invalidated causing a full run over all seven. Then we
+  // fold three SCCs back to one, and then run over the whole module again.
+  EXPECT_EQ(4 + 2 + 7 + 1 + 4, IndirectSCCAnalysisRuns);
+  EXPECT_EQ(4 + 2 + 7 + 1 + 4, DoublyIndirectSCCAnalysisRuns);
+
+  // First we run over all six functions. Then we re-run it over three when we
+  // split their SCCs. Then we re-run over the whole module. Then we re-run
+  // over three functions merged back into a single SCC, and then over the
+  // whole module again.
+  EXPECT_EQ(6 + 3 + 6 + 3 + 6, FunctionAnalysisRuns);
+
+  // Re run the function analysis over the entire module, and then re-run it
+  // over the `(h3, h1, h2)` SCC due to invalidation. Then we re-run it over
+  // the entire module, then the three functions merged back into a single SCC,
+  // and then over the whole module.
+  EXPECT_EQ(6 + 3 + 6 + 3 + 6, IndirectFunctionAnalysisRuns);
+}
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/CMakeLists.txt b/src/llvm-project/llvm/unittests/Analysis/CMakeLists.txt
new file mode 100644
index 0000000..563b48d
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/CMakeLists.txt
@@ -0,0 +1,35 @@
+set(LLVM_LINK_COMPONENTS
+  Analysis
+  AsmParser
+  Core
+  Support
+  )
+
+add_llvm_unittest(AnalysisTests
+  AliasAnalysisTest.cpp
+  AliasSetTrackerTest.cpp
+  BasicAliasAnalysisTest.cpp
+  BlockFrequencyInfoTest.cpp
+  BranchProbabilityInfoTest.cpp
+  CallGraphTest.cpp
+  CaptureTrackingTest.cpp
+  CFGTest.cpp
+  CGSCCPassManagerTest.cpp
+  DivergenceAnalysisTest.cpp
+  GlobalsModRefTest.cpp
+  ValueLatticeTest.cpp
+  LazyCallGraphTest.cpp
+  LoopInfoTest.cpp
+  MemoryBuiltinsTest.cpp
+  MemorySSATest.cpp
+  OrderedBasicBlockTest.cpp
+  OrderedInstructionsTest.cpp
+  PhiValuesTest.cpp
+  ProfileSummaryInfoTest.cpp
+  ScalarEvolutionTest.cpp
+  SparsePropagation.cpp
+  TargetLibraryInfoTest.cpp
+  TBAATest.cpp
+  UnrollAnalyzerTest.cpp
+  ValueTrackingTest.cpp
+  )
diff --git a/src/llvm-project/llvm/unittests/Analysis/CallGraphTest.cpp b/src/llvm-project/llvm/unittests/Analysis/CallGraphTest.cpp
new file mode 100644
index 0000000..2d4e63f
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/CallGraphTest.cpp
@@ -0,0 +1,61 @@
+//=======- CallGraphTest.cpp - Unit tests for the CG analysis -------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+template <typename Ty> void canSpecializeGraphTraitsIterators(Ty *G) {
+  typedef typename GraphTraits<Ty *>::NodeRef NodeRef;
+
+  auto I = GraphTraits<Ty *>::nodes_begin(G);
+  auto E = GraphTraits<Ty *>::nodes_end(G);
+  auto X = ++I;
+
+  // Should be able to iterate over all nodes of the graph.
+  static_assert(std::is_same<decltype(*I), NodeRef>::value,
+                "Node type does not match");
+  static_assert(std::is_same<decltype(*X), NodeRef>::value,
+                "Node type does not match");
+  static_assert(std::is_same<decltype(*E), NodeRef>::value,
+                "Node type does not match");
+
+  NodeRef N = GraphTraits<Ty *>::getEntryNode(G);
+
+  auto S = GraphTraits<NodeRef>::child_begin(N);
+  auto F = GraphTraits<NodeRef>::child_end(N);
+
+  // Should be able to iterate over immediate successors of a node.
+  static_assert(std::is_same<decltype(*S), NodeRef>::value,
+                "Node type does not match");
+  static_assert(std::is_same<decltype(*F), NodeRef>::value,
+                "Node type does not match");
+}
+
+TEST(CallGraphTest, GraphTraitsSpecialization) {
+  LLVMContext Context;
+  Module M("", Context);
+  CallGraph CG(M);
+
+  canSpecializeGraphTraitsIterators(&CG);
+}
+
+TEST(CallGraphTest, GraphTraitsConstSpecialization) {
+  LLVMContext Context;
+  Module M("", Context);
+  CallGraph CG(M);
+
+  canSpecializeGraphTraitsIterators(const_cast<const CallGraph *>(&CG));
+}
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/CaptureTrackingTest.cpp b/src/llvm-project/llvm/unittests/Analysis/CaptureTrackingTest.cpp
new file mode 100644
index 0000000..ee6e010
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/CaptureTrackingTest.cpp
@@ -0,0 +1,78 @@
+//=======- CaptureTrackingTest.cpp - Unit test for the Capture Tracking ---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/OrderedBasicBlock.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+TEST(CaptureTracking, MaxUsesToExplore) {
+  StringRef Assembly = R"(
+    ; Function Attrs: nounwind ssp uwtable
+    declare void @doesnt_capture(i8* nocapture, i8* nocapture, i8* nocapture, 
+                                 i8* nocapture, i8* nocapture)
+
+    ; %arg has 5 uses
+    define void @test_few_uses(i8* %arg) {
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      ret void
+    }
+
+    ; %arg has 50 uses
+    define void @test_many_uses(i8* %arg) {
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      call void @doesnt_capture(i8* %arg, i8* %arg, i8* %arg, i8* %arg, i8* %arg)
+      ret void
+    }
+  )";
+
+  LLVMContext Context;
+  SMDiagnostic Error;
+  auto M = parseAssemblyString(Assembly, Error, Context);
+  ASSERT_TRUE(M) << "Bad assembly?";
+
+  auto Test = [&M](const char *FName, unsigned FalseMaxUsesLimit,
+                   unsigned TrueMaxUsesLimit) {
+    Function *F = M->getFunction(FName);
+    ASSERT_NE(F, nullptr);
+    Value *Arg = &*F->arg_begin();
+    ASSERT_NE(Arg, nullptr);
+    ASSERT_FALSE(PointerMayBeCaptured(Arg, true, true, FalseMaxUsesLimit));
+    ASSERT_TRUE(PointerMayBeCaptured(Arg, true, true, TrueMaxUsesLimit));
+
+    BasicBlock *EntryBB = &F->getEntryBlock();
+    DominatorTree DT(*F);
+    OrderedBasicBlock OBB(EntryBB);
+
+    Instruction *Ret = EntryBB->getTerminator();
+    ASSERT_TRUE(isa<ReturnInst>(Ret));
+    ASSERT_FALSE(PointerMayBeCapturedBefore(Arg, true, true, Ret, &DT, false, 
+                                            &OBB, FalseMaxUsesLimit));
+    ASSERT_TRUE(PointerMayBeCapturedBefore(Arg, true, true, Ret, &DT, false,
+                                           &OBB, TrueMaxUsesLimit));
+  };
+
+  Test("test_few_uses", 6, 4);
+  Test("test_many_uses", 50, 30);
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/DivergenceAnalysisTest.cpp b/src/llvm-project/llvm/unittests/Analysis/DivergenceAnalysisTest.cpp
new file mode 100644
index 0000000..97dbd18
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/DivergenceAnalysisTest.cpp
@@ -0,0 +1,431 @@
+//===- DivergenceAnalysisTest.cpp - DivergenceAnalysis unit tests ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/SyncDependenceAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+BasicBlock *GetBlockByName(StringRef BlockName, Function &F) {
+  for (auto &BB : F) {
+    if (BB.getName() != BlockName)
+      continue;
+    return &BB;
+  }
+  return nullptr;
+}
+
+// We use this fixture to ensure that we clean up DivergenceAnalysis before
+// deleting the PassManager.
+class DivergenceAnalysisTest : public testing::Test {
+protected:
+  LLVMContext Context;
+  Module M;
+  TargetLibraryInfoImpl TLII;
+  TargetLibraryInfo TLI;
+
+  std::unique_ptr<DominatorTree> DT;
+  std::unique_ptr<PostDominatorTree> PDT;
+  std::unique_ptr<LoopInfo> LI;
+  std::unique_ptr<SyncDependenceAnalysis> SDA;
+
+  DivergenceAnalysisTest() : M("", Context), TLII(), TLI(TLII) {}
+
+  DivergenceAnalysis buildDA(Function &F, bool IsLCSSA) {
+    DT.reset(new DominatorTree(F));
+    PDT.reset(new PostDominatorTree(F));
+    LI.reset(new LoopInfo(*DT));
+    SDA.reset(new SyncDependenceAnalysis(*DT, *PDT, *LI));
+    return DivergenceAnalysis(F, nullptr, *DT, *LI, *SDA, IsLCSSA);
+  }
+
+  void runWithDA(
+      Module &M, StringRef FuncName, bool IsLCSSA,
+      function_ref<void(Function &F, LoopInfo &LI, DivergenceAnalysis &DA)>
+          Test) {
+    auto *F = M.getFunction(FuncName);
+    ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
+    DivergenceAnalysis DA = buildDA(*F, IsLCSSA);
+    Test(*F, *LI, DA);
+  }
+};
+
+// Simple initial state test
+TEST_F(DivergenceAnalysisTest, DAInitialState) {
+  IntegerType *IntTy = IntegerType::getInt32Ty(Context);
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), {IntTy}, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
+  ReturnInst::Create(Context, nullptr, BB);
+
+  DivergenceAnalysis DA = buildDA(*F, false);
+
+  // Whole function region
+  EXPECT_EQ(DA.getRegionLoop(), nullptr);
+
+  // No divergence in initial state
+  EXPECT_FALSE(DA.hasDetectedDivergence());
+
+  // No spurious divergence
+  DA.compute();
+  EXPECT_FALSE(DA.hasDetectedDivergence());
+
+  // Detected divergence after marking
+  Argument &arg = *F->arg_begin();
+  DA.markDivergent(arg);
+
+  EXPECT_TRUE(DA.hasDetectedDivergence());
+  EXPECT_TRUE(DA.isDivergent(arg));
+
+  DA.compute();
+  EXPECT_TRUE(DA.hasDetectedDivergence());
+  EXPECT_TRUE(DA.isDivergent(arg));
+}
+
+TEST_F(DivergenceAnalysisTest, DANoLCSSA) {
+  LLVMContext C;
+  SMDiagnostic Err;
+
+  std::unique_ptr<Module> M = parseAssemblyString(
+      "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+      " "
+      "define i32 @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
+      "    local_unnamed_addr { "
+      "entry: "
+      "  br label %loop.ph "
+      " "
+      "loop.ph: "
+      "  br label %loop "
+      " "
+      "loop: "
+      "  %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
+      "  %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
+      "  %iv0.inc = add i32 %iv0, 1 "
+      "  %iv1.inc = add i32 %iv1, 3 "
+      "  %cond.cont = icmp slt i32 %iv0, %n "
+      "  br i1 %cond.cont, label %loop, label %for.end.loopexit "
+      " "
+      "for.end.loopexit: "
+      "  ret i32 %iv0 "
+      "} ",
+      Err, C);
+
+  Function *F = M->getFunction("f_1");
+  DivergenceAnalysis DA = buildDA(*F, false);
+  EXPECT_FALSE(DA.hasDetectedDivergence());
+
+  auto ItArg = F->arg_begin();
+  ItArg++;
+  auto &NArg = *ItArg;
+
+  // Seed divergence in argument %n
+  DA.markDivergent(NArg);
+
+  DA.compute();
+  EXPECT_TRUE(DA.hasDetectedDivergence());
+
+  // Verify that "ret %iv.0" is divergent
+  auto ItBlock = F->begin();
+  std::advance(ItBlock, 3);
+  auto &ExitBlock = *GetBlockByName("for.end.loopexit", *F);
+  auto &RetInst = *cast<ReturnInst>(ExitBlock.begin());
+  EXPECT_TRUE(DA.isDivergent(RetInst));
+}
+
+TEST_F(DivergenceAnalysisTest, DALCSSA) {
+  LLVMContext C;
+  SMDiagnostic Err;
+
+  std::unique_ptr<Module> M = parseAssemblyString(
+      "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+      " "
+      "define i32 @f_lcssa(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
+      "    local_unnamed_addr { "
+      "entry: "
+      "  br label %loop.ph "
+      " "
+      "loop.ph: "
+      "  br label %loop "
+      " "
+      "loop: "
+      "  %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
+      "  %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
+      "  %iv0.inc = add i32 %iv0, 1 "
+      "  %iv1.inc = add i32 %iv1, 3 "
+      "  %cond.cont = icmp slt i32 %iv0, %n "
+      "  br i1 %cond.cont, label %loop, label %for.end.loopexit "
+      " "
+      "for.end.loopexit: "
+      "  %val.ret = phi i32 [ %iv0, %loop ] "
+      "  br label %detached.return "
+      " "
+      "detached.return: "
+      "  ret i32 %val.ret "
+      "} ",
+      Err, C);
+
+  Function *F = M->getFunction("f_lcssa");
+  DivergenceAnalysis DA = buildDA(*F, true);
+  EXPECT_FALSE(DA.hasDetectedDivergence());
+
+  auto ItArg = F->arg_begin();
+  ItArg++;
+  auto &NArg = *ItArg;
+
+  // Seed divergence in argument %n
+  DA.markDivergent(NArg);
+
+  DA.compute();
+  EXPECT_TRUE(DA.hasDetectedDivergence());
+
+  // Verify that "ret %iv.0" is divergent
+  auto ItBlock = F->begin();
+  std::advance(ItBlock, 4);
+  auto &ExitBlock = *GetBlockByName("detached.return", *F);
+  auto &RetInst = *cast<ReturnInst>(ExitBlock.begin());
+  EXPECT_TRUE(DA.isDivergent(RetInst));
+}
+
+TEST_F(DivergenceAnalysisTest, DAJoinDivergence) {
+  LLVMContext C;
+  SMDiagnostic Err;
+
+  std::unique_ptr<Module> M = parseAssemblyString(
+      "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+      " "
+      "define void @f_1(i1 %a, i1 %b, i1 %c) "
+      "    local_unnamed_addr { "
+      "A: "
+      "  br i1 %a, label %B, label %C "
+      " "
+      "B: "
+      "  br i1 %b, label %C, label %D "
+      " "
+      "C: "
+      "  %c.join = phi i32 [ 0, %A ], [ 1, %B ] "
+      "  br i1 %c, label %D, label %E "
+      " "
+      "D: "
+      "  %d.join = phi i32 [ 0, %B ], [ 1, %C ] "
+      "  br label %E "
+      " "
+      "E: "
+      "  %e.join = phi i32 [ 0, %C ], [ 1, %D ] "
+      "  ret void "
+      "} "
+      " "
+      "define void @f_2(i1 %a, i1 %b, i1 %c) "
+      "    local_unnamed_addr { "
+      "A: "
+      "  br i1 %a, label %B, label %E "
+      " "
+      "B: "
+      "  br i1 %b, label %C, label %D "
+      " "
+      "C: "
+      "  br label %D "
+      " "
+      "D: "
+      "  %d.join = phi i32 [ 0, %B ], [ 1, %C ] "
+      "  br label %E "
+      " "
+      "E: "
+      "  %e.join = phi i32 [ 0, %A ], [ 1, %D ] "
+      "  ret void "
+      "} "
+      " "
+      "define void @f_3(i1 %a, i1 %b, i1 %c)"
+      "    local_unnamed_addr { "
+      "A: "
+      "  br i1 %a, label %B, label %C "
+      " "
+      "B: "
+      "  br label %C "
+      " "
+      "C: "
+      "  %c.join = phi i32 [ 0, %A ], [ 1, %B ] "
+      "  br i1 %c, label %D, label %E "
+      " "
+      "D: "
+      "  br label %E "
+      " "
+      "E: "
+      "  %e.join = phi i32 [ 0, %C ], [ 1, %D ] "
+      "  ret void "
+      "} ",
+      Err, C);
+
+  // Maps divergent conditions to the basic blocks whose Phi nodes become
+  // divergent. Blocks need to be listed in IR order.
+  using SmallBlockVec = SmallVector<const BasicBlock *, 4>;
+  using InducedDivJoinMap = std::map<const Value *, SmallBlockVec>;
+
+  // Actual function performing the checks.
+  auto CheckDivergenceFunc = [this](Function &F,
+                                    InducedDivJoinMap &ExpectedDivJoins) {
+    for (auto &ItCase : ExpectedDivJoins) {
+      auto *DivVal = ItCase.first;
+      auto DA = buildDA(F, false);
+      DA.markDivergent(*DivVal);
+      DA.compute();
+
+      // List of basic blocks that shall host divergent Phi nodes.
+      auto ItDivJoins = ItCase.second.begin();
+
+      for (auto &BB : F) {
+        auto *Phi = dyn_cast<PHINode>(BB.begin());
+        if (!Phi)
+          continue;
+
+        if (ItDivJoins != ItCase.second.end() && &BB == *ItDivJoins) {
+          EXPECT_TRUE(DA.isDivergent(*Phi));
+          // Advance to next block with expected divergent PHI node.
+          ++ItDivJoins;
+        } else {
+          EXPECT_FALSE(DA.isDivergent(*Phi));
+        }
+      }
+    }
+  };
+
+  {
+    auto *F = M->getFunction("f_1");
+    auto ItBlocks = F->begin();
+    ItBlocks++; // Skip A
+    ItBlocks++; // Skip B
+    auto *C = &*ItBlocks++;
+    auto *D = &*ItBlocks++;
+    auto *E = &*ItBlocks;
+
+    auto ItArg = F->arg_begin();
+    auto *AArg = &*ItArg++;
+    auto *BArg = &*ItArg++;
+    auto *CArg = &*ItArg;
+
+    InducedDivJoinMap DivJoins;
+    DivJoins.emplace(AArg, SmallBlockVec({C, D, E}));
+    DivJoins.emplace(BArg, SmallBlockVec({D, E}));
+    DivJoins.emplace(CArg, SmallBlockVec({E}));
+
+    CheckDivergenceFunc(*F, DivJoins);
+  }
+
+  {
+    auto *F = M->getFunction("f_2");
+    auto ItBlocks = F->begin();
+    ItBlocks++; // Skip A
+    ItBlocks++; // Skip B
+    ItBlocks++; // Skip C
+    auto *D = &*ItBlocks++;
+    auto *E = &*ItBlocks;
+
+    auto ItArg = F->arg_begin();
+    auto *AArg = &*ItArg++;
+    auto *BArg = &*ItArg++;
+    auto *CArg = &*ItArg;
+
+    InducedDivJoinMap DivJoins;
+    DivJoins.emplace(AArg, SmallBlockVec({E}));
+    DivJoins.emplace(BArg, SmallBlockVec({D}));
+    DivJoins.emplace(CArg, SmallBlockVec({}));
+
+    CheckDivergenceFunc(*F, DivJoins);
+  }
+
+  {
+    auto *F = M->getFunction("f_3");
+    auto ItBlocks = F->begin();
+    ItBlocks++; // Skip A
+    ItBlocks++; // Skip B
+    auto *C = &*ItBlocks++;
+    ItBlocks++; // Skip D
+    auto *E = &*ItBlocks;
+
+    auto ItArg = F->arg_begin();
+    auto *AArg = &*ItArg++;
+    auto *BArg = &*ItArg++;
+    auto *CArg = &*ItArg;
+
+    InducedDivJoinMap DivJoins;
+    DivJoins.emplace(AArg, SmallBlockVec({C}));
+    DivJoins.emplace(BArg, SmallBlockVec({}));
+    DivJoins.emplace(CArg, SmallBlockVec({E}));
+
+    CheckDivergenceFunc(*F, DivJoins);
+  }
+}
+
+TEST_F(DivergenceAnalysisTest, DASwitchUnreachableDefault) {
+  LLVMContext C;
+  SMDiagnostic Err;
+
+  std::unique_ptr<Module> M = parseAssemblyString(
+      "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+      " "
+      "define void @switch_unreachable_default(i32 %cond) local_unnamed_addr { "
+      "entry: "
+      "  switch i32 %cond, label %sw.default [ "
+      "    i32 0, label %sw.bb0 "
+      "    i32 1, label %sw.bb1 "
+      "  ] "
+      " "
+      "sw.bb0: "
+      "  br label %sw.epilog "
+      " "
+      "sw.bb1: "
+      "  br label %sw.epilog "
+      " "
+      "sw.default: "
+      "  unreachable "
+      " "
+      "sw.epilog: "
+      "  %div.dbl = phi double [ 0.0, %sw.bb0], [ -1.0, %sw.bb1 ] "
+      "  ret void "
+      "}",
+      Err, C);
+
+  auto *F = M->getFunction("switch_unreachable_default");
+  auto &CondArg = *F->arg_begin();
+  auto DA = buildDA(*F, false);
+
+  EXPECT_FALSE(DA.hasDetectedDivergence());
+
+  DA.markDivergent(CondArg);
+  DA.compute();
+
+  // Still %CondArg is divergent.
+  EXPECT_TRUE(DA.hasDetectedDivergence());
+
+  // The join uni.dbl is not divergent (see D52221)
+  auto &ExitBlock = *GetBlockByName("sw.epilog", *F);
+  auto &DivDblPhi = *cast<PHINode>(ExitBlock.begin());
+  EXPECT_TRUE(DA.isDivergent(DivDblPhi));
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/src/llvm-project/llvm/unittests/Analysis/GlobalsModRefTest.cpp b/src/llvm-project/llvm/unittests/Analysis/GlobalsModRefTest.cpp
new file mode 100644
index 0000000..323edc2
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/GlobalsModRefTest.cpp
@@ -0,0 +1,55 @@
+//===--- GlobalsModRefTest.cpp - Mixed TBAA unit tests --------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+TEST(GlobalsModRef, OptNone) {
+  StringRef Assembly = R"(
+    define void @f1() optnone {
+      ret void
+    }
+    define void @f2() optnone readnone {
+      ret void
+    }
+    define void @f3() optnone readonly {
+      ret void
+    }
+  )";
+
+  LLVMContext Context;
+  SMDiagnostic Error;
+  auto M = parseAssemblyString(Assembly, Error, Context);
+  ASSERT_TRUE(M) << "Bad assembly?";
+
+  const auto &funcs = M->functions();
+  auto I = funcs.begin();
+  ASSERT_NE(I, funcs.end());
+  const Function &F1 = *I;
+  ASSERT_NE(++I, funcs.end());
+  const Function &F2 = *I;
+  ASSERT_NE(++I, funcs.end());
+  const Function &F3 = *I;
+  EXPECT_EQ(++I, funcs.end());
+
+  Triple Trip(M->getTargetTriple());
+  TargetLibraryInfoImpl TLII(Trip);
+  TargetLibraryInfo TLI(TLII);
+  llvm::CallGraph CG(*M);
+
+  auto AAR = GlobalsAAResult::analyzeModule(*M, TLI, CG);
+
+  EXPECT_EQ(FMRB_UnknownModRefBehavior, AAR.getModRefBehavior(&F1));
+  EXPECT_EQ(FMRB_DoesNotAccessMemory, AAR.getModRefBehavior(&F2));
+  EXPECT_EQ(FMRB_OnlyReadsMemory, AAR.getModRefBehavior(&F3));
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/LazyCallGraphTest.cpp b/src/llvm-project/llvm/unittests/Analysis/LazyCallGraphTest.cpp
new file mode 100644
index 0000000..5e6dd1a
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/LazyCallGraphTest.cpp
@@ -0,0 +1,2142 @@
+//===- LazyCallGraphTest.cpp - Unit tests for the lazy CG analysis --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+#include <memory>
+
+using namespace llvm;
+
+namespace {
+
+std::unique_ptr<Module> parseAssembly(LLVMContext &Context,
+                                      const char *Assembly) {
+  SMDiagnostic Error;
+  std::unique_ptr<Module> M = parseAssemblyString(Assembly, Error, Context);
+
+  std::string ErrMsg;
+  raw_string_ostream OS(ErrMsg);
+  Error.print("", OS);
+
+  // A failure here means that the test itself is buggy.
+  if (!M)
+    report_fatal_error(OS.str().c_str());
+
+  return M;
+}
+
+/*
+   IR forming a call graph with a diamond of triangle-shaped SCCs:
+
+           d1
+          /  \
+         d3--d2
+        /     \
+       b1     c1
+     /  \    /  \
+    b3--b2  c3--c2
+         \  /
+          a1
+         /  \
+        a3--a2
+
+   All call edges go up between SCCs, and clockwise around the SCC.
+ */
+static const char DiamondOfTriangles[] =
+     "define void @a1() {\n"
+     "entry:\n"
+     "  call void @a2()\n"
+     "  call void @b2()\n"
+     "  call void @c3()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @a2() {\n"
+     "entry:\n"
+     "  call void @a3()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @a3() {\n"
+     "entry:\n"
+     "  call void @a1()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @b1() {\n"
+     "entry:\n"
+     "  call void @b2()\n"
+     "  call void @d3()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @b2() {\n"
+     "entry:\n"
+     "  call void @b3()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @b3() {\n"
+     "entry:\n"
+     "  call void @b1()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @c1() {\n"
+     "entry:\n"
+     "  call void @c2()\n"
+     "  call void @d2()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @c2() {\n"
+     "entry:\n"
+     "  call void @c3()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @c3() {\n"
+     "entry:\n"
+     "  call void @c1()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @d1() {\n"
+     "entry:\n"
+     "  call void @d2()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @d2() {\n"
+     "entry:\n"
+     "  call void @d3()\n"
+     "  ret void\n"
+     "}\n"
+     "define void @d3() {\n"
+     "entry:\n"
+     "  call void @d1()\n"
+     "  ret void\n"
+     "}\n";
+
+/*
+   IR forming a reference graph with a diamond of triangle-shaped RefSCCs
+
+           d1
+          /  \
+         d3--d2
+        /     \
+       b1     c1
+     /  \    /  \
+    b3--b2  c3--c2
+         \  /
+          a1
+         /  \
+        a3--a2
+
+   All call edges go up between RefSCCs, and clockwise around the RefSCC.
+ */
+static const char DiamondOfTrianglesRefGraph[] =
+     "define void @a1() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @a2, void ()** %a\n"
+     "  store void ()* @b2, void ()** %a\n"
+     "  store void ()* @c3, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @a2() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @a3, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @a3() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @a1, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @b1() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @b2, void ()** %a\n"
+     "  store void ()* @d3, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @b2() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @b3, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @b3() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @b1, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @c1() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @c2, void ()** %a\n"
+     "  store void ()* @d2, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @c2() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @c3, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @c3() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @c1, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @d1() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @d2, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @d2() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @d3, void ()** %a\n"
+     "  ret void\n"
+     "}\n"
+     "define void @d3() {\n"
+     "entry:\n"
+     "  %a = alloca void ()*\n"
+     "  store void ()* @d1, void ()** %a\n"
+     "  ret void\n"
+     "}\n";
+
+static LazyCallGraph buildCG(Module &M) {
+  TargetLibraryInfoImpl TLII(Triple(M.getTargetTriple()));
+  TargetLibraryInfo TLI(TLII);
+  LazyCallGraph CG(M, TLI);
+  return CG;
+}
+
+TEST(LazyCallGraphTest, BasicGraphFormation) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
+  LazyCallGraph CG = buildCG(*M);
+
+  // The order of the entry nodes should be stable w.r.t. the source order of
+  // the IR, and everything in our module is an entry node, so just directly
+  // build variables for each node.
+  auto I = CG.begin();
+  LazyCallGraph::Node &A1 = (I++)->getNode();
+  EXPECT_EQ("a1", A1.getFunction().getName());
+  LazyCallGraph::Node &A2 = (I++)->getNode();
+  EXPECT_EQ("a2", A2.getFunction().getName());
+  LazyCallGraph::Node &A3 = (I++)->getNode();
+  EXPECT_EQ("a3", A3.getFunction().getName());
+  LazyCallGraph::Node &B1 = (I++)->getNode();
+  EXPECT_EQ("b1", B1.getFunction().getName());
+  LazyCallGraph::Node &B2 = (I++)->getNode();
+  EXPECT_EQ("b2", B2.getFunction().getName());
+  LazyCallGraph::Node &B3 = (I++)->getNode();
+  EXPECT_EQ("b3", B3.getFunction().getName());
+  LazyCallGraph::Node &C1 = (I++)->getNode();
+  EXPECT_EQ("c1", C1.getFunction().getName());
+  LazyCallGraph::Node &C2 = (I++)->getNode();
+  EXPECT_EQ("c2", C2.getFunction().getName());
+  LazyCallGraph::Node &C3 = (I++)->getNode();
+  EXPECT_EQ("c3", C3.getFunction().getName());
+  LazyCallGraph::Node &D1 = (I++)->getNode();
+  EXPECT_EQ("d1", D1.getFunction().getName());
+  LazyCallGraph::Node &D2 = (I++)->getNode();
+  EXPECT_EQ("d2", D2.getFunction().getName());
+  LazyCallGraph::Node &D3 = (I++)->getNode();
+  EXPECT_EQ("d3", D3.getFunction().getName());
+  EXPECT_EQ(CG.end(), I);
+
+  // Build vectors and sort them for the rest of the assertions to make them
+  // independent of order.
+  std::vector<std::string> Nodes;
+
+  for (LazyCallGraph::Edge &E : A1.populate())
+    Nodes.push_back(E.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ("a2", Nodes[0]);
+  EXPECT_EQ("b2", Nodes[1]);
+  EXPECT_EQ("c3", Nodes[2]);
+  Nodes.clear();
+
+  A2.populate();
+  EXPECT_EQ(A2->end(), std::next(A2->begin()));
+  EXPECT_EQ("a3", A2->begin()->getFunction().getName());
+  A3.populate();
+  EXPECT_EQ(A3->end(), std::next(A3->begin()));
+  EXPECT_EQ("a1", A3->begin()->getFunction().getName());
+
+  for (LazyCallGraph::Edge &E : B1.populate())
+    Nodes.push_back(E.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ("b2", Nodes[0]);
+  EXPECT_EQ("d3", Nodes[1]);
+  Nodes.clear();
+
+  B2.populate();
+  EXPECT_EQ(B2->end(), std::next(B2->begin()));
+  EXPECT_EQ("b3", B2->begin()->getFunction().getName());
+  B3.populate();
+  EXPECT_EQ(B3->end(), std::next(B3->begin()));
+  EXPECT_EQ("b1", B3->begin()->getFunction().getName());
+
+  for (LazyCallGraph::Edge &E : C1.populate())
+    Nodes.push_back(E.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ("c2", Nodes[0]);
+  EXPECT_EQ("d2", Nodes[1]);
+  Nodes.clear();
+
+  C2.populate();
+  EXPECT_EQ(C2->end(), std::next(C2->begin()));
+  EXPECT_EQ("c3", C2->begin()->getFunction().getName());
+  C3.populate();
+  EXPECT_EQ(C3->end(), std::next(C3->begin()));
+  EXPECT_EQ("c1", C3->begin()->getFunction().getName());
+
+  D1.populate();
+  EXPECT_EQ(D1->end(), std::next(D1->begin()));
+  EXPECT_EQ("d2", D1->begin()->getFunction().getName());
+  D2.populate();
+  EXPECT_EQ(D2->end(), std::next(D2->begin()));
+  EXPECT_EQ("d3", D2->begin()->getFunction().getName());
+  D3.populate();
+  EXPECT_EQ(D3->end(), std::next(D3->begin()));
+  EXPECT_EQ("d1", D3->begin()->getFunction().getName());
+
+  // Now lets look at the RefSCCs and SCCs.
+  CG.buildRefSCCs();
+  auto J = CG.postorder_ref_scc_begin();
+
+  LazyCallGraph::RefSCC &D = *J++;
+  ASSERT_EQ(1, D.size());
+  for (LazyCallGraph::Node &N : *D.begin())
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("d1", Nodes[0]);
+  EXPECT_EQ("d2", Nodes[1]);
+  EXPECT_EQ("d3", Nodes[2]);
+  Nodes.clear();
+  EXPECT_FALSE(D.isParentOf(D));
+  EXPECT_FALSE(D.isChildOf(D));
+  EXPECT_FALSE(D.isAncestorOf(D));
+  EXPECT_FALSE(D.isDescendantOf(D));
+  EXPECT_EQ(&D, &*CG.postorder_ref_scc_begin());
+
+  LazyCallGraph::RefSCC &C = *J++;
+  ASSERT_EQ(1, C.size());
+  for (LazyCallGraph::Node &N : *C.begin())
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("c1", Nodes[0]);
+  EXPECT_EQ("c2", Nodes[1]);
+  EXPECT_EQ("c3", Nodes[2]);
+  Nodes.clear();
+  EXPECT_TRUE(C.isParentOf(D));
+  EXPECT_FALSE(C.isChildOf(D));
+  EXPECT_TRUE(C.isAncestorOf(D));
+  EXPECT_FALSE(C.isDescendantOf(D));
+  EXPECT_EQ(&C, &*std::next(CG.postorder_ref_scc_begin()));
+
+  LazyCallGraph::RefSCC &B = *J++;
+  ASSERT_EQ(1, B.size());
+  for (LazyCallGraph::Node &N : *B.begin())
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("b1", Nodes[0]);
+  EXPECT_EQ("b2", Nodes[1]);
+  EXPECT_EQ("b3", Nodes[2]);
+  Nodes.clear();
+  EXPECT_TRUE(B.isParentOf(D));
+  EXPECT_FALSE(B.isChildOf(D));
+  EXPECT_TRUE(B.isAncestorOf(D));
+  EXPECT_FALSE(B.isDescendantOf(D));
+  EXPECT_FALSE(B.isAncestorOf(C));
+  EXPECT_FALSE(C.isAncestorOf(B));
+  EXPECT_EQ(&B, &*std::next(CG.postorder_ref_scc_begin(), 2));
+
+  LazyCallGraph::RefSCC &A = *J++;
+  ASSERT_EQ(1, A.size());
+  for (LazyCallGraph::Node &N : *A.begin())
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("a1", Nodes[0]);
+  EXPECT_EQ("a2", Nodes[1]);
+  EXPECT_EQ("a3", Nodes[2]);
+  Nodes.clear();
+  EXPECT_TRUE(A.isParentOf(B));
+  EXPECT_TRUE(A.isParentOf(C));
+  EXPECT_FALSE(A.isParentOf(D));
+  EXPECT_TRUE(A.isAncestorOf(B));
+  EXPECT_TRUE(A.isAncestorOf(C));
+  EXPECT_TRUE(A.isAncestorOf(D));
+  EXPECT_EQ(&A, &*std::next(CG.postorder_ref_scc_begin(), 3));
+
+  EXPECT_EQ(CG.postorder_ref_scc_end(), J);
+  EXPECT_EQ(J, std::next(CG.postorder_ref_scc_begin(), 4));
+}
+
+static Function &lookupFunction(Module &M, StringRef Name) {
+  for (Function &F : M)
+    if (F.getName() == Name)
+      return F;
+  report_fatal_error("Couldn't find function!");
+}
+
+TEST(LazyCallGraphTest, BasicGraphMutation) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+                                                     "entry:\n"
+                                                     "  call void @b()\n"
+                                                     "  call void @c()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @b() {\n"
+                                                     "entry:\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @c() {\n"
+                                                     "entry:\n"
+                                                     "  ret void\n"
+                                                     "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  LazyCallGraph::Node &A = CG.get(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = CG.get(lookupFunction(*M, "b"));
+  A.populate();
+  EXPECT_EQ(2, std::distance(A->begin(), A->end()));
+  B.populate();
+  EXPECT_EQ(0, std::distance(B->begin(), B->end()));
+
+  LazyCallGraph::Node &C = CG.get(lookupFunction(*M, "c"));
+  C.populate();
+  CG.insertEdge(B, C, LazyCallGraph::Edge::Call);
+  EXPECT_EQ(1, std::distance(B->begin(), B->end()));
+  EXPECT_EQ(0, std::distance(C->begin(), C->end()));
+
+  CG.insertEdge(C, B, LazyCallGraph::Edge::Call);
+  EXPECT_EQ(1, std::distance(C->begin(), C->end()));
+  EXPECT_EQ(&B, &C->begin()->getNode());
+
+  CG.insertEdge(C, C, LazyCallGraph::Edge::Call);
+  EXPECT_EQ(2, std::distance(C->begin(), C->end()));
+  EXPECT_EQ(&B, &C->begin()->getNode());
+  EXPECT_EQ(&C, &std::next(C->begin())->getNode());
+
+  CG.removeEdge(C, B);
+  EXPECT_EQ(1, std::distance(C->begin(), C->end()));
+  EXPECT_EQ(&C, &C->begin()->getNode());
+
+  CG.removeEdge(C, C);
+  EXPECT_EQ(0, std::distance(C->begin(), C->end()));
+
+  CG.removeEdge(B, C);
+  EXPECT_EQ(0, std::distance(B->begin(), B->end()));
+}
+
+TEST(LazyCallGraphTest, InnerSCCFormation) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
+  LazyCallGraph CG = buildCG(*M);
+
+  // Now mutate the graph to connect every node into a single RefSCC to ensure
+  // that our inner SCC formation handles the rest.
+  LazyCallGraph::Node &D1 = CG.get(lookupFunction(*M, "d1"));
+  LazyCallGraph::Node &A1 = CG.get(lookupFunction(*M, "a1"));
+  A1.populate();
+  D1.populate();
+  CG.insertEdge(D1, A1, LazyCallGraph::Edge::Ref);
+
+  // Build vectors and sort them for the rest of the assertions to make them
+  // independent of order.
+  std::vector<std::string> Nodes;
+
+  // We should build a single RefSCC for the entire graph.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  // Now walk the four SCCs which should be in post-order.
+  auto J = RC.begin();
+  LazyCallGraph::SCC &D = *J++;
+  for (LazyCallGraph::Node &N : D)
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("d1", Nodes[0]);
+  EXPECT_EQ("d2", Nodes[1]);
+  EXPECT_EQ("d3", Nodes[2]);
+  Nodes.clear();
+
+  LazyCallGraph::SCC &B = *J++;
+  for (LazyCallGraph::Node &N : B)
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("b1", Nodes[0]);
+  EXPECT_EQ("b2", Nodes[1]);
+  EXPECT_EQ("b3", Nodes[2]);
+  Nodes.clear();
+
+  LazyCallGraph::SCC &C = *J++;
+  for (LazyCallGraph::Node &N : C)
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("c1", Nodes[0]);
+  EXPECT_EQ("c2", Nodes[1]);
+  EXPECT_EQ("c3", Nodes[2]);
+  Nodes.clear();
+
+  LazyCallGraph::SCC &A = *J++;
+  for (LazyCallGraph::Node &N : A)
+    Nodes.push_back(N.getFunction().getName());
+  llvm::sort(Nodes);
+  EXPECT_EQ(3u, Nodes.size());
+  EXPECT_EQ("a1", Nodes[0]);
+  EXPECT_EQ("a2", Nodes[1]);
+  EXPECT_EQ("a3", Nodes[2]);
+  Nodes.clear();
+
+  EXPECT_EQ(RC.end(), J);
+}
+
+TEST(LazyCallGraphTest, MultiArmSCC) {
+  LLVMContext Context;
+  // Two interlocking cycles. The really useful thing about this SCC is that it
+  // will require Tarjan's DFS to backtrack and finish processing all of the
+  // children of each node in the SCC. Since this involves call edges, both
+  // Tarjan implementations will have to successfully navigate the structure.
+  std::unique_ptr<Module> M = parseAssembly(Context, "define void @f1() {\n"
+                                                     "entry:\n"
+                                                     "  call void @f2()\n"
+                                                     "  call void @f4()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @f2() {\n"
+                                                     "entry:\n"
+                                                     "  call void @f3()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @f3() {\n"
+                                                     "entry:\n"
+                                                     "  call void @f1()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @f4() {\n"
+                                                     "entry:\n"
+                                                     "  call void @f5()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @f5() {\n"
+                                                     "entry:\n"
+                                                     "  call void @f1()\n"
+                                                     "  ret void\n"
+                                                     "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  LazyCallGraph::Node &N1 = *CG.lookup(lookupFunction(*M, "f1"));
+  LazyCallGraph::Node &N2 = *CG.lookup(lookupFunction(*M, "f2"));
+  LazyCallGraph::Node &N3 = *CG.lookup(lookupFunction(*M, "f3"));
+  LazyCallGraph::Node &N4 = *CG.lookup(lookupFunction(*M, "f4"));
+  LazyCallGraph::Node &N5 = *CG.lookup(lookupFunction(*M, "f4"));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(N1));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(N2));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(N3));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(N4));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(N5));
+
+  ASSERT_EQ(1, RC.size());
+
+  LazyCallGraph::SCC &C = *RC.begin();
+  EXPECT_EQ(&C, CG.lookupSCC(N1));
+  EXPECT_EQ(&C, CG.lookupSCC(N2));
+  EXPECT_EQ(&C, CG.lookupSCC(N3));
+  EXPECT_EQ(&C, CG.lookupSCC(N4));
+  EXPECT_EQ(&C, CG.lookupSCC(N5));
+}
+
+TEST(LazyCallGraphTest, OutgoingEdgeMutation) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+                                                     "entry:\n"
+                                                     "  call void @b()\n"
+                                                     "  call void @c()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @b() {\n"
+                                                     "entry:\n"
+                                                     "  call void @d()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @c() {\n"
+                                                     "entry:\n"
+                                                     "  call void @d()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @d() {\n"
+                                                     "entry:\n"
+                                                     "  ret void\n"
+                                                     "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+    dbgs() << "Formed RefSCC: " << RC << "\n";
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+  LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+  LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
+  LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+  LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
+  LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
+  LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
+  LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
+  LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
+  EXPECT_TRUE(ARC.isParentOf(BRC));
+  EXPECT_TRUE(AC.isParentOf(BC));
+  EXPECT_TRUE(ARC.isParentOf(CRC));
+  EXPECT_TRUE(AC.isParentOf(CC));
+  EXPECT_FALSE(ARC.isParentOf(DRC));
+  EXPECT_FALSE(AC.isParentOf(DC));
+  EXPECT_TRUE(ARC.isAncestorOf(DRC));
+  EXPECT_TRUE(AC.isAncestorOf(DC));
+  EXPECT_FALSE(DRC.isChildOf(ARC));
+  EXPECT_FALSE(DC.isChildOf(AC));
+  EXPECT_TRUE(DRC.isDescendantOf(ARC));
+  EXPECT_TRUE(DC.isDescendantOf(AC));
+  EXPECT_TRUE(DRC.isChildOf(BRC));
+  EXPECT_TRUE(DC.isChildOf(BC));
+  EXPECT_TRUE(DRC.isChildOf(CRC));
+  EXPECT_TRUE(DC.isChildOf(CC));
+
+  EXPECT_EQ(2, std::distance(A->begin(), A->end()));
+  ARC.insertOutgoingEdge(A, D, LazyCallGraph::Edge::Call);
+  EXPECT_EQ(3, std::distance(A->begin(), A->end()));
+  const LazyCallGraph::Edge &NewE = (*A)[D];
+  EXPECT_TRUE(NewE);
+  EXPECT_TRUE(NewE.isCall());
+  EXPECT_EQ(&D, &NewE.getNode());
+
+  // Only the parent and child tests sholud have changed. The rest of the graph
+  // remains the same.
+  EXPECT_TRUE(ARC.isParentOf(DRC));
+  EXPECT_TRUE(AC.isParentOf(DC));
+  EXPECT_TRUE(ARC.isAncestorOf(DRC));
+  EXPECT_TRUE(AC.isAncestorOf(DC));
+  EXPECT_TRUE(DRC.isChildOf(ARC));
+  EXPECT_TRUE(DC.isChildOf(AC));
+  EXPECT_TRUE(DRC.isDescendantOf(ARC));
+  EXPECT_TRUE(DC.isDescendantOf(AC));
+  EXPECT_EQ(&AC, CG.lookupSCC(A));
+  EXPECT_EQ(&BC, CG.lookupSCC(B));
+  EXPECT_EQ(&CC, CG.lookupSCC(C));
+  EXPECT_EQ(&DC, CG.lookupSCC(D));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+  EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
+
+  ARC.switchOutgoingEdgeToRef(A, D);
+  EXPECT_FALSE(NewE.isCall());
+
+  // Verify the reference graph remains the same but the SCC graph is updated.
+  EXPECT_TRUE(ARC.isParentOf(DRC));
+  EXPECT_FALSE(AC.isParentOf(DC));
+  EXPECT_TRUE(ARC.isAncestorOf(DRC));
+  EXPECT_TRUE(AC.isAncestorOf(DC));
+  EXPECT_TRUE(DRC.isChildOf(ARC));
+  EXPECT_FALSE(DC.isChildOf(AC));
+  EXPECT_TRUE(DRC.isDescendantOf(ARC));
+  EXPECT_TRUE(DC.isDescendantOf(AC));
+  EXPECT_EQ(&AC, CG.lookupSCC(A));
+  EXPECT_EQ(&BC, CG.lookupSCC(B));
+  EXPECT_EQ(&CC, CG.lookupSCC(C));
+  EXPECT_EQ(&DC, CG.lookupSCC(D));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+  EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
+
+  ARC.switchOutgoingEdgeToCall(A, D);
+  EXPECT_TRUE(NewE.isCall());
+
+  // Verify the reference graph remains the same but the SCC graph is updated.
+  EXPECT_TRUE(ARC.isParentOf(DRC));
+  EXPECT_TRUE(AC.isParentOf(DC));
+  EXPECT_TRUE(ARC.isAncestorOf(DRC));
+  EXPECT_TRUE(AC.isAncestorOf(DC));
+  EXPECT_TRUE(DRC.isChildOf(ARC));
+  EXPECT_TRUE(DC.isChildOf(AC));
+  EXPECT_TRUE(DRC.isDescendantOf(ARC));
+  EXPECT_TRUE(DC.isDescendantOf(AC));
+  EXPECT_EQ(&AC, CG.lookupSCC(A));
+  EXPECT_EQ(&BC, CG.lookupSCC(B));
+  EXPECT_EQ(&CC, CG.lookupSCC(C));
+  EXPECT_EQ(&DC, CG.lookupSCC(D));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+  EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
+
+  ARC.removeOutgoingEdge(A, D);
+  EXPECT_EQ(2, std::distance(A->begin(), A->end()));
+
+  // Now the parent and child tests fail again but the rest remains the same.
+  EXPECT_FALSE(ARC.isParentOf(DRC));
+  EXPECT_FALSE(AC.isParentOf(DC));
+  EXPECT_TRUE(ARC.isAncestorOf(DRC));
+  EXPECT_TRUE(AC.isAncestorOf(DC));
+  EXPECT_FALSE(DRC.isChildOf(ARC));
+  EXPECT_FALSE(DC.isChildOf(AC));
+  EXPECT_TRUE(DRC.isDescendantOf(ARC));
+  EXPECT_TRUE(DC.isDescendantOf(AC));
+  EXPECT_EQ(&AC, CG.lookupSCC(A));
+  EXPECT_EQ(&BC, CG.lookupSCC(B));
+  EXPECT_EQ(&CC, CG.lookupSCC(C));
+  EXPECT_EQ(&DC, CG.lookupSCC(D));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C));
+  EXPECT_EQ(&DRC, CG.lookupRefSCC(D));
+}
+
+TEST(LazyCallGraphTest, IncomingEdgeInsertion) {
+  LLVMContext Context;
+  // We want to ensure we can add edges even across complex diamond graphs, so
+  // we use the diamond of triangles graph defined above. The ascii diagram is
+  // repeated here for easy reference.
+  //
+  //         d1       |
+  //        /  \      |
+  //       d3--d2     |
+  //      /     \     |
+  //     b1     c1    |
+  //   /  \    /  \   |
+  //  b3--b2  c3--c2  |
+  //       \  /       |
+  //        a1        |
+  //       /  \       |
+  //      a3--a2      |
+  //
+  std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+    dbgs() << "Formed RefSCC: " << RC << "\n";
+
+  LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
+  LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
+  LazyCallGraph::Node &A3 = *CG.lookup(lookupFunction(*M, "a3"));
+  LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
+  LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
+  LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
+  LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
+  LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
+  LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
+  LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
+  LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
+  LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
+  LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
+  LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
+  LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
+  LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
+  ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
+  ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
+  ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
+  ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
+  ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
+  ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
+  ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
+  ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
+  ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
+
+  // Add an edge to make the graph:
+  //
+  //         d1         |
+  //        /  \        |
+  //       d3--d2---.   |
+  //      /     \    |  |
+  //     b1     c1   |  |
+  //   /  \    /  \ /   |
+  //  b3--b2  c3--c2    |
+  //       \  /         |
+  //        a1          |
+  //       /  \         |
+  //      a3--a2        |
+  auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
+  // Make sure we connected the nodes.
+  for (LazyCallGraph::Edge E : *D2) {
+    if (&E.getNode() == &D3)
+      continue;
+    EXPECT_EQ(&C2, &E.getNode());
+  }
+  // And marked the D ref-SCC as no longer valid.
+  EXPECT_EQ(1u, MergedRCs.size());
+  EXPECT_EQ(&DRC, MergedRCs[0]);
+
+  // Make sure we have the correct nodes in the SCC sets.
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
+
+  // And that ancestry tests have been updated.
+  EXPECT_TRUE(ARC.isParentOf(CRC));
+  EXPECT_TRUE(BRC.isParentOf(CRC));
+
+  // And verify the post-order walk reflects the updated structure.
+  auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+  ASSERT_NE(I, E);
+  EXPECT_EQ(&CRC, &*I) << "Actual RefSCC: " << *I;
+  ASSERT_NE(++I, E);
+  EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
+  ASSERT_NE(++I, E);
+  EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
+  EXPECT_EQ(++I, E);
+}
+
+TEST(LazyCallGraphTest, IncomingEdgeInsertionRefGraph) {
+  LLVMContext Context;
+  // Another variation of the above test but with all the edges switched to
+  // references rather than calls.
+  std::unique_ptr<Module> M =
+      parseAssembly(Context, DiamondOfTrianglesRefGraph);
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+    dbgs() << "Formed RefSCC: " << RC << "\n";
+
+  LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
+  LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
+  LazyCallGraph::Node &A3 = *CG.lookup(lookupFunction(*M, "a3"));
+  LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
+  LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
+  LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
+  LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
+  LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
+  LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
+  LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
+  LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
+  LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
+  LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
+  LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
+  LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
+  LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
+  ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
+  ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
+  ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
+  ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
+  ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
+  ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
+  ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
+  ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
+  ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
+
+  // Add an edge to make the graph:
+  //
+  //         d1         |
+  //        /  \        |
+  //       d3--d2---.   |
+  //      /     \    |  |
+  //     b1     c1   |  |
+  //   /  \    /  \ /   |
+  //  b3--b2  c3--c2    |
+  //       \  /         |
+  //        a1          |
+  //       /  \         |
+  //      a3--a2        |
+  auto MergedRCs = CRC.insertIncomingRefEdge(D2, C2);
+  // Make sure we connected the nodes.
+  for (LazyCallGraph::Edge E : *D2) {
+    if (&E.getNode() == &D3)
+      continue;
+    EXPECT_EQ(&C2, &E.getNode());
+  }
+  // And marked the D ref-SCC as no longer valid.
+  EXPECT_EQ(1u, MergedRCs.size());
+  EXPECT_EQ(&DRC, MergedRCs[0]);
+
+  // Make sure we have the correct nodes in the SCC sets.
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(D1));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(D2));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(D3));
+
+  // And that ancestry tests have been updated.
+  EXPECT_TRUE(ARC.isParentOf(CRC));
+  EXPECT_TRUE(BRC.isParentOf(CRC));
+
+  // And verify the post-order walk reflects the updated structure.
+  auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+  ASSERT_NE(I, E);
+  EXPECT_EQ(&CRC, &*I) << "Actual RefSCC: " << *I;
+  ASSERT_NE(++I, E);
+  EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
+  ASSERT_NE(++I, E);
+  EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
+  EXPECT_EQ(++I, E);
+}
+
+TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeCallCycle) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+                                                     "entry:\n"
+                                                     "  call void @b()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @b() {\n"
+                                                     "entry:\n"
+                                                     "  call void @c()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @c() {\n"
+                                                     "entry:\n"
+                                                     "  call void @d()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @d() {\n"
+                                                     "entry:\n"
+                                                     "  ret void\n"
+                                                     "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+    dbgs() << "Formed RefSCC: " << RC << "\n";
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+  LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+  LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
+  LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+  LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
+  LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
+  LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
+  LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
+  LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
+
+  // Connect the top to the bottom forming a large RefSCC made up mostly of calls.
+  auto MergedRCs = ARC.insertIncomingRefEdge(D, A);
+  // Make sure we connected the nodes.
+  EXPECT_NE(D->begin(), D->end());
+  EXPECT_EQ(&A, &D->begin()->getNode());
+
+  // Check that we have the dead RCs, but ignore the order.
+  EXPECT_EQ(3u, MergedRCs.size());
+  EXPECT_NE(find(MergedRCs, &BRC), MergedRCs.end());
+  EXPECT_NE(find(MergedRCs, &CRC), MergedRCs.end());
+  EXPECT_NE(find(MergedRCs, &DRC), MergedRCs.end());
+
+  // Make sure the nodes point to the right place now.
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(C));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(D));
+
+  // Check that the SCCs are in postorder.
+  EXPECT_EQ(4, ARC.size());
+  EXPECT_EQ(&DC, &ARC[0]);
+  EXPECT_EQ(&CC, &ARC[1]);
+  EXPECT_EQ(&BC, &ARC[2]);
+  EXPECT_EQ(&AC, &ARC[3]);
+
+  // And verify the post-order walk reflects the updated structure.
+  auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+  ASSERT_NE(I, E);
+  EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
+  EXPECT_EQ(++I, E);
+}
+
+TEST(LazyCallGraphTest, IncomingEdgeInsertionLargeRefCycle) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M =
+      parseAssembly(Context, "define void @a() {\n"
+                             "entry:\n"
+                             "  %p = alloca void ()*\n"
+                             "  store void ()* @b, void ()** %p\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @b() {\n"
+                             "entry:\n"
+                             "  %p = alloca void ()*\n"
+                             "  store void ()* @c, void ()** %p\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @c() {\n"
+                             "entry:\n"
+                             "  %p = alloca void ()*\n"
+                             "  store void ()* @d, void ()** %p\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @d() {\n"
+                             "entry:\n"
+                             "  ret void\n"
+                             "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+    dbgs() << "Formed RefSCC: " << RC << "\n";
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+  LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A);
+  LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B);
+  LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C);
+  LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D);
+
+  // Connect the top to the bottom forming a large RefSCC made up just of
+  // references.
+  auto MergedRCs = ARC.insertIncomingRefEdge(D, A);
+  // Make sure we connected the nodes.
+  EXPECT_NE(D->begin(), D->end());
+  EXPECT_EQ(&A, &D->begin()->getNode());
+
+  // Check that we have the dead RCs, but ignore the order.
+  EXPECT_EQ(3u, MergedRCs.size());
+  EXPECT_NE(find(MergedRCs, &BRC), MergedRCs.end());
+  EXPECT_NE(find(MergedRCs, &CRC), MergedRCs.end());
+  EXPECT_NE(find(MergedRCs, &DRC), MergedRCs.end());
+
+  // Make sure the nodes point to the right place now.
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(C));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(D));
+
+  // And verify the post-order walk reflects the updated structure.
+  auto I = CG.postorder_ref_scc_begin(), End = CG.postorder_ref_scc_end();
+  ASSERT_NE(I, End);
+  EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
+  EXPECT_EQ(++I, End);
+}
+
+TEST(LazyCallGraphTest, InlineAndDeleteFunction) {
+  LLVMContext Context;
+  // We want to ensure we can delete nodes from relatively complex graphs and
+  // so use the diamond of triangles graph defined above.
+  //
+  // The ascii diagram is repeated here for easy reference.
+  //
+  //         d1       |
+  //        /  \      |
+  //       d3--d2     |
+  //      /     \     |
+  //     b1     c1    |
+  //   /  \    /  \   |
+  //  b3--b2  c3--c2  |
+  //       \  /       |
+  //        a1        |
+  //       /  \       |
+  //      a3--a2      |
+  //
+  std::unique_ptr<Module> M = parseAssembly(Context, DiamondOfTriangles);
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs())
+    dbgs() << "Formed RefSCC: " << RC << "\n";
+
+  LazyCallGraph::Node &A1 = *CG.lookup(lookupFunction(*M, "a1"));
+  LazyCallGraph::Node &A2 = *CG.lookup(lookupFunction(*M, "a2"));
+  LazyCallGraph::Node &A3 = *CG.lookup(lookupFunction(*M, "a3"));
+  LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
+  LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
+  LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
+  LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
+  LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
+  LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
+  LazyCallGraph::Node &D1 = *CG.lookup(lookupFunction(*M, "d1"));
+  LazyCallGraph::Node &D2 = *CG.lookup(lookupFunction(*M, "d2"));
+  LazyCallGraph::Node &D3 = *CG.lookup(lookupFunction(*M, "d3"));
+  LazyCallGraph::RefSCC &ARC = *CG.lookupRefSCC(A1);
+  LazyCallGraph::RefSCC &BRC = *CG.lookupRefSCC(B1);
+  LazyCallGraph::RefSCC &CRC = *CG.lookupRefSCC(C1);
+  LazyCallGraph::RefSCC &DRC = *CG.lookupRefSCC(D1);
+  ASSERT_EQ(&ARC, CG.lookupRefSCC(A2));
+  ASSERT_EQ(&ARC, CG.lookupRefSCC(A3));
+  ASSERT_EQ(&BRC, CG.lookupRefSCC(B2));
+  ASSERT_EQ(&BRC, CG.lookupRefSCC(B3));
+  ASSERT_EQ(&CRC, CG.lookupRefSCC(C2));
+  ASSERT_EQ(&CRC, CG.lookupRefSCC(C3));
+  ASSERT_EQ(&DRC, CG.lookupRefSCC(D2));
+  ASSERT_EQ(&DRC, CG.lookupRefSCC(D3));
+  ASSERT_EQ(1, std::distance(D2->begin(), D2->end()));
+
+  // Delete d2 from the graph, as if it had been inlined.
+  //
+  //         d1         |
+  //        / /         |
+  //       d3--.        |
+  //      /     \       |
+  //     b1     c1      |
+  //   /  \    /  \     |
+  //  b3--b2  c3--c2    |
+  //       \  /         |
+  //        a1          |
+  //       /  \         |
+  //      a3--a2        |
+
+  Function &D2F = D2.getFunction();
+  CallInst *C1Call = nullptr, *D1Call = nullptr;
+  for (User *U : D2F.users()) {
+    CallInst *CI = dyn_cast<CallInst>(U);
+    ASSERT_TRUE(CI) << "Expected a call: " << *U;
+    if (CI->getParent()->getParent() == &C1.getFunction()) {
+      ASSERT_EQ(nullptr, C1Call) << "Found too many C1 calls: " << *CI;
+      C1Call = CI;
+    } else if (CI->getParent()->getParent() == &D1.getFunction()) {
+      ASSERT_EQ(nullptr, D1Call) << "Found too many D1 calls: " << *CI;
+      D1Call = CI;
+    } else {
+      FAIL() << "Found an unexpected call instruction: " << *CI;
+    }
+  }
+  ASSERT_NE(C1Call, nullptr);
+  ASSERT_NE(D1Call, nullptr);
+  ASSERT_EQ(&D2F, C1Call->getCalledFunction());
+  ASSERT_EQ(&D2F, D1Call->getCalledFunction());
+  C1Call->setCalledFunction(&D3.getFunction());
+  D1Call->setCalledFunction(&D3.getFunction());
+  ASSERT_EQ(0u, D2F.getNumUses());
+
+  // Insert new edges first.
+  CRC.insertTrivialCallEdge(C1, D3);
+  DRC.insertTrivialCallEdge(D1, D3);
+
+  // Then remove the old ones.
+  LazyCallGraph::SCC &DC = *CG.lookupSCC(D2);
+  auto NewCs = DRC.switchInternalEdgeToRef(D1, D2);
+  EXPECT_EQ(&DC, CG.lookupSCC(D2));
+  EXPECT_EQ(NewCs.end(), std::next(NewCs.begin()));
+  LazyCallGraph::SCC &NewDC = *NewCs.begin();
+  EXPECT_EQ(&NewDC, CG.lookupSCC(D1));
+  EXPECT_EQ(&NewDC, CG.lookupSCC(D3));
+  auto NewRCs = DRC.removeInternalRefEdge(D1, {&D2});
+  ASSERT_EQ(2u, NewRCs.size());
+  LazyCallGraph::RefSCC &NewDRC = *NewRCs[0];
+  EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D1));
+  EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D3));
+  LazyCallGraph::RefSCC &D2RC = *NewRCs[1];
+  EXPECT_EQ(&D2RC, CG.lookupRefSCC(D2));
+  EXPECT_FALSE(NewDRC.isParentOf(D2RC));
+  EXPECT_TRUE(CRC.isParentOf(D2RC));
+  EXPECT_TRUE(CRC.isParentOf(NewDRC));
+  EXPECT_TRUE(D2RC.isParentOf(NewDRC));
+  CRC.removeOutgoingEdge(C1, D2);
+  EXPECT_FALSE(CRC.isParentOf(D2RC));
+  EXPECT_TRUE(CRC.isParentOf(NewDRC));
+  EXPECT_TRUE(D2RC.isParentOf(NewDRC));
+
+  // Now that we've updated the call graph, D2 is dead, so remove it.
+  CG.removeDeadFunction(D2F);
+
+  // Check that the graph still looks the same.
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A1));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A2));
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A3));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B1));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B2));
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B3));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C1));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C2));
+  EXPECT_EQ(&CRC, CG.lookupRefSCC(C3));
+  EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D1));
+  EXPECT_EQ(&NewDRC, CG.lookupRefSCC(D3));
+  EXPECT_TRUE(CRC.isParentOf(NewDRC));
+
+  // Verify the post-order walk hasn't changed.
+  auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+  ASSERT_NE(I, E);
+  EXPECT_EQ(&NewDRC, &*I) << "Actual RefSCC: " << *I;
+  ASSERT_NE(++I, E);
+  EXPECT_EQ(&CRC, &*I) << "Actual RefSCC: " << *I;
+  ASSERT_NE(++I, E);
+  EXPECT_EQ(&BRC, &*I) << "Actual RefSCC: " << *I;
+  ASSERT_NE(++I, E);
+  EXPECT_EQ(&ARC, &*I) << "Actual RefSCC: " << *I;
+  EXPECT_EQ(++I, E);
+}
+
+TEST(LazyCallGraphTest, InternalEdgeMutation) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+                                                     "entry:\n"
+                                                     "  call void @b()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @b() {\n"
+                                                     "entry:\n"
+                                                     "  call void @c()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @c() {\n"
+                                                     "entry:\n"
+                                                     "  call void @a()\n"
+                                                     "  ret void\n"
+                                                     "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+  EXPECT_EQ(1, RC.size());
+  EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
+  EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
+  EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
+
+  // Insert an edge from 'a' to 'c'. Nothing changes about the graph.
+  RC.insertInternalRefEdge(A, C);
+  EXPECT_EQ(2, std::distance(A->begin(), A->end()));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+  EXPECT_EQ(1, RC.size());
+  EXPECT_EQ(&*RC.begin(), CG.lookupSCC(A));
+  EXPECT_EQ(&*RC.begin(), CG.lookupSCC(B));
+  EXPECT_EQ(&*RC.begin(), CG.lookupSCC(C));
+
+  // Switch the call edge from 'b' to 'c' to a ref edge. This will break the
+  // call cycle and cause us to form more SCCs. The RefSCC will remain the same
+  // though.
+  auto NewCs = RC.switchInternalEdgeToRef(B, C);
+  EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+  auto J = RC.begin();
+  // The SCCs must be in *post-order* which means successors before
+  // predecessors. At this point we have call edges from C to A and from A to
+  // B. The only valid postorder is B, A, C.
+  EXPECT_EQ(&*J++, CG.lookupSCC(B));
+  EXPECT_EQ(&*J++, CG.lookupSCC(A));
+  EXPECT_EQ(&*J++, CG.lookupSCC(C));
+  EXPECT_EQ(RC.end(), J);
+  // And the returned range must be the slice of this sequence containing new
+  // SCCs.
+  EXPECT_EQ(RC.begin(), NewCs.begin());
+  EXPECT_EQ(std::prev(RC.end()), NewCs.end());
+
+  // Test turning the ref edge from A to C into a call edge. This will form an
+  // SCC out of A and C. Since we previously had a call edge from C to A, the
+  // C SCC should be preserved and have A merged into it while the A SCC should
+  // be invalidated.
+  LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+  LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+  EXPECT_TRUE(RC.switchInternalEdgeToCall(A, C, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+    ASSERT_EQ(1u, MergedCs.size());
+    EXPECT_EQ(&AC, MergedCs[0]);
+  }));
+  EXPECT_EQ(2, CC.size());
+  EXPECT_EQ(&CC, CG.lookupSCC(A));
+  EXPECT_EQ(&CC, CG.lookupSCC(C));
+  J = RC.begin();
+  EXPECT_EQ(&*J++, CG.lookupSCC(B));
+  EXPECT_EQ(&*J++, CG.lookupSCC(C));
+  EXPECT_EQ(RC.end(), J);
+}
+
+TEST(LazyCallGraphTest, InternalEdgeRemoval) {
+  LLVMContext Context;
+  // A nice fully connected (including self-edges) RefSCC.
+  std::unique_ptr<Module> M = parseAssembly(
+      Context, "define void @a(i8** %ptr) {\n"
+               "entry:\n"
+               "  store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n"
+               "define void @b(i8** %ptr) {\n"
+               "entry:\n"
+               "  store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n"
+               "define void @c(i8** %ptr) {\n"
+               "entry:\n"
+               "  store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+  LazyCallGraph::RefSCC &RC = *I;
+  EXPECT_EQ(E, std::next(I));
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+
+  // Remove the edge from b -> a, which should leave the 3 functions still in
+  // a single connected component because of a -> b -> c -> a.
+  SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
+      RC.removeInternalRefEdge(B, {&A});
+  EXPECT_EQ(0u, NewRCs.size());
+  EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+  auto J = CG.postorder_ref_scc_begin();
+  EXPECT_EQ(I, J);
+  EXPECT_EQ(&RC, &*J);
+  EXPECT_EQ(E, std::next(J));
+
+  // Increment I before we actually mutate the structure so that it remains
+  // a valid iterator.
+  ++I;
+
+  // Remove the edge from c -> a, which should leave 'a' in the original RefSCC
+  // and form a new RefSCC for 'b' and 'c'.
+  NewRCs = RC.removeInternalRefEdge(C, {&A});
+  ASSERT_EQ(2u, NewRCs.size());
+  LazyCallGraph::RefSCC &BCRC = *NewRCs[0];
+  LazyCallGraph::RefSCC &ARC = *NewRCs[1];
+  EXPECT_EQ(&ARC, CG.lookupRefSCC(A));
+  EXPECT_EQ(1, std::distance(ARC.begin(), ARC.end()));
+  EXPECT_EQ(&BCRC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&BCRC, CG.lookupRefSCC(C));
+  J = CG.postorder_ref_scc_begin();
+  EXPECT_NE(I, J);
+  EXPECT_EQ(&BCRC, &*J);
+  ++J;
+  EXPECT_NE(I, J);
+  EXPECT_EQ(&ARC, &*J);
+  ++J;
+  EXPECT_EQ(I, J);
+  EXPECT_EQ(E, J);
+}
+
+TEST(LazyCallGraphTest, InternalMultiEdgeRemoval) {
+  LLVMContext Context;
+  // A nice fully connected (including self-edges) RefSCC.
+  std::unique_ptr<Module> M = parseAssembly(
+      Context, "define void @a(i8** %ptr) {\n"
+               "entry:\n"
+               "  store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n"
+               "define void @b(i8** %ptr) {\n"
+               "entry:\n"
+               "  store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n"
+               "define void @c(i8** %ptr) {\n"
+               "entry:\n"
+               "  store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+               "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+  LazyCallGraph::RefSCC &RC = *I;
+  EXPECT_EQ(E, std::next(I));
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(B));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(C));
+
+  // Increment I before we actually mutate the structure so that it remains
+  // a valid iterator.
+  ++I;
+
+  // Remove the edges from b -> a and b -> c, leaving b in its own RefSCC.
+  SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
+      RC.removeInternalRefEdge(B, {&A, &C});
+
+  ASSERT_EQ(2u, NewRCs.size());
+  LazyCallGraph::RefSCC &BRC = *NewRCs[0];
+  LazyCallGraph::RefSCC &ACRC = *NewRCs[1];
+  EXPECT_EQ(&BRC, CG.lookupRefSCC(B));
+  EXPECT_EQ(1, std::distance(BRC.begin(), BRC.end()));
+  EXPECT_EQ(&ACRC, CG.lookupRefSCC(A));
+  EXPECT_EQ(&ACRC, CG.lookupRefSCC(C));
+  auto J = CG.postorder_ref_scc_begin();
+  EXPECT_NE(I, J);
+  EXPECT_EQ(&BRC, &*J);
+  ++J;
+  EXPECT_NE(I, J);
+  EXPECT_EQ(&ACRC, &*J);
+  ++J;
+  EXPECT_EQ(I, J);
+  EXPECT_EQ(E, J);
+}
+
+TEST(LazyCallGraphTest, InternalNoOpEdgeRemoval) {
+  LLVMContext Context;
+  // A graph with a single cycle formed both from call and reference edges
+  // which makes the reference edges trivial to delete. The graph looks like:
+  //
+  // Reference edges: a -> b -> c -> a
+  //      Call edges: a -> c -> b -> a
+  std::unique_ptr<Module> M = parseAssembly(
+      Context, "define void @a(i8** %ptr) {\n"
+               "entry:\n"
+               "  call void @b(i8** %ptr)\n"
+               "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n"
+               "define void @b(i8** %ptr) {\n"
+               "entry:\n"
+               "  store i8* bitcast (void(i8**)* @a to i8*), i8** %ptr\n"
+               "  call void @c(i8** %ptr)\n"
+               "  ret void\n"
+               "}\n"
+               "define void @c(i8** %ptr) {\n"
+               "entry:\n"
+               "  call void @a(i8** %ptr)\n"
+               "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+               "  ret void\n"
+               "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin(), E = CG.postorder_ref_scc_end();
+  LazyCallGraph::RefSCC &RC = *I;
+  EXPECT_EQ(E, std::next(I));
+
+  LazyCallGraph::SCC &C = *RC.begin();
+  EXPECT_EQ(RC.end(), std::next(RC.begin()));
+
+  LazyCallGraph::Node &AN = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &BN = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &CN = *CG.lookup(lookupFunction(*M, "c"));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(AN));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(BN));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(CN));
+  EXPECT_EQ(&C, CG.lookupSCC(AN));
+  EXPECT_EQ(&C, CG.lookupSCC(BN));
+  EXPECT_EQ(&C, CG.lookupSCC(CN));
+
+  // Remove the edge from a -> c which doesn't change anything.
+  SmallVector<LazyCallGraph::RefSCC *, 1> NewRCs =
+      RC.removeInternalRefEdge(AN, {&CN});
+  EXPECT_EQ(0u, NewRCs.size());
+  EXPECT_EQ(&RC, CG.lookupRefSCC(AN));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(BN));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(CN));
+  EXPECT_EQ(&C, CG.lookupSCC(AN));
+  EXPECT_EQ(&C, CG.lookupSCC(BN));
+  EXPECT_EQ(&C, CG.lookupSCC(CN));
+  auto J = CG.postorder_ref_scc_begin();
+  EXPECT_EQ(I, J);
+  EXPECT_EQ(&RC, &*J);
+  EXPECT_EQ(E, std::next(J));
+
+  // Remove the edge from b -> a and c -> b; again this doesn't change
+  // anything.
+  NewRCs = RC.removeInternalRefEdge(BN, {&AN});
+  NewRCs = RC.removeInternalRefEdge(CN, {&BN});
+  EXPECT_EQ(0u, NewRCs.size());
+  EXPECT_EQ(&RC, CG.lookupRefSCC(AN));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(BN));
+  EXPECT_EQ(&RC, CG.lookupRefSCC(CN));
+  EXPECT_EQ(&C, CG.lookupSCC(AN));
+  EXPECT_EQ(&C, CG.lookupSCC(BN));
+  EXPECT_EQ(&C, CG.lookupSCC(CN));
+  J = CG.postorder_ref_scc_begin();
+  EXPECT_EQ(I, J);
+  EXPECT_EQ(&RC, &*J);
+  EXPECT_EQ(E, std::next(J));
+}
+
+TEST(LazyCallGraphTest, InternalCallEdgeToRef) {
+  LLVMContext Context;
+  // A nice fully connected (including self-edges) SCC (and RefSCC)
+  std::unique_ptr<Module> M = parseAssembly(Context, "define void @a() {\n"
+                                                     "entry:\n"
+                                                     "  call void @a()\n"
+                                                     "  call void @b()\n"
+                                                     "  call void @c()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @b() {\n"
+                                                     "entry:\n"
+                                                     "  call void @a()\n"
+                                                     "  call void @b()\n"
+                                                     "  call void @c()\n"
+                                                     "  ret void\n"
+                                                     "}\n"
+                                                     "define void @c() {\n"
+                                                     "entry:\n"
+                                                     "  call void @a()\n"
+                                                     "  call void @b()\n"
+                                                     "  call void @c()\n"
+                                                     "  ret void\n"
+                                                     "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  EXPECT_EQ(1, RC.size());
+  LazyCallGraph::SCC &AC = *RC.begin();
+
+  LazyCallGraph::Node &AN = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &BN = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &CN = *CG.lookup(lookupFunction(*M, "c"));
+  EXPECT_EQ(&AC, CG.lookupSCC(AN));
+  EXPECT_EQ(&AC, CG.lookupSCC(BN));
+  EXPECT_EQ(&AC, CG.lookupSCC(CN));
+
+  // Remove the call edge from b -> a to a ref edge, which should leave the
+  // 3 functions still in a single connected component because of a -> b ->
+  // c -> a.
+  auto NewCs = RC.switchInternalEdgeToRef(BN, AN);
+  EXPECT_EQ(NewCs.begin(), NewCs.end());
+  EXPECT_EQ(1, RC.size());
+  EXPECT_EQ(&AC, CG.lookupSCC(AN));
+  EXPECT_EQ(&AC, CG.lookupSCC(BN));
+  EXPECT_EQ(&AC, CG.lookupSCC(CN));
+
+  // Remove the edge from c -> a, which should leave 'a' in the original SCC
+  // and form a new SCC for 'b' and 'c'.
+  NewCs = RC.switchInternalEdgeToRef(CN, AN);
+  EXPECT_EQ(1, std::distance(NewCs.begin(), NewCs.end()));
+  EXPECT_EQ(2, RC.size());
+  EXPECT_EQ(&AC, CG.lookupSCC(AN));
+  LazyCallGraph::SCC &BC = *CG.lookupSCC(BN);
+  EXPECT_NE(&BC, &AC);
+  EXPECT_EQ(&BC, CG.lookupSCC(CN));
+  auto J = RC.find(AC);
+  EXPECT_EQ(&AC, &*J);
+  --J;
+  EXPECT_EQ(&BC, &*J);
+  EXPECT_EQ(RC.begin(), J);
+  EXPECT_EQ(J, NewCs.begin());
+
+  // Remove the edge from c -> b, which should leave 'b' in the original SCC
+  // and form a new SCC for 'c'. It shouldn't change 'a's SCC.
+  NewCs = RC.switchInternalEdgeToRef(CN, BN);
+  EXPECT_EQ(1, std::distance(NewCs.begin(), NewCs.end()));
+  EXPECT_EQ(3, RC.size());
+  EXPECT_EQ(&AC, CG.lookupSCC(AN));
+  EXPECT_EQ(&BC, CG.lookupSCC(BN));
+  LazyCallGraph::SCC &CC = *CG.lookupSCC(CN);
+  EXPECT_NE(&CC, &AC);
+  EXPECT_NE(&CC, &BC);
+  J = RC.find(AC);
+  EXPECT_EQ(&AC, &*J);
+  --J;
+  EXPECT_EQ(&BC, &*J);
+  --J;
+  EXPECT_EQ(&CC, &*J);
+  EXPECT_EQ(RC.begin(), J);
+  EXPECT_EQ(J, NewCs.begin());
+}
+
+TEST(LazyCallGraphTest, InternalRefEdgeToCall) {
+  LLVMContext Context;
+  // Basic tests for making a ref edge a call. This hits the basics of the
+  // process only.
+  std::unique_ptr<Module> M =
+      parseAssembly(Context, "define void @a() {\n"
+                             "entry:\n"
+                             "  call void @b()\n"
+                             "  call void @c()\n"
+                             "  store void()* @d, void()** undef\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @b() {\n"
+                             "entry:\n"
+                             "  store void()* @c, void()** undef\n"
+                             "  call void @d()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @c() {\n"
+                             "entry:\n"
+                             "  store void()* @b, void()** undef\n"
+                             "  call void @d()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @d() {\n"
+                             "entry:\n"
+                             "  store void()* @a, void()** undef\n"
+                             "  ret void\n"
+                             "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+  LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+  LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
+  LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+  LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
+
+  // Check the initial post-order. Note that B and C could be flipped here (and
+  // in our mutation) without changing the nature of this test.
+  ASSERT_EQ(4, RC.size());
+  EXPECT_EQ(&DC, &RC[0]);
+  EXPECT_EQ(&BC, &RC[1]);
+  EXPECT_EQ(&CC, &RC[2]);
+  EXPECT_EQ(&AC, &RC[3]);
+
+  // Switch the ref edge from A -> D to a call edge. This should have no
+  // effect as it is already in postorder and no new cycles are formed.
+  EXPECT_FALSE(RC.switchInternalEdgeToCall(A, D));
+  ASSERT_EQ(4, RC.size());
+  EXPECT_EQ(&DC, &RC[0]);
+  EXPECT_EQ(&BC, &RC[1]);
+  EXPECT_EQ(&CC, &RC[2]);
+  EXPECT_EQ(&AC, &RC[3]);
+
+  // Switch B -> C to a call edge. This doesn't form any new cycles but does
+  // require reordering the SCCs.
+  EXPECT_FALSE(RC.switchInternalEdgeToCall(B, C));
+  ASSERT_EQ(4, RC.size());
+  EXPECT_EQ(&DC, &RC[0]);
+  EXPECT_EQ(&CC, &RC[1]);
+  EXPECT_EQ(&BC, &RC[2]);
+  EXPECT_EQ(&AC, &RC[3]);
+
+  // Switch C -> B to a call edge. This forms a cycle and forces merging SCCs.
+  EXPECT_TRUE(RC.switchInternalEdgeToCall(C, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+    ASSERT_EQ(1u, MergedCs.size());
+    EXPECT_EQ(&CC, MergedCs[0]);
+  }));
+  ASSERT_EQ(3, RC.size());
+  EXPECT_EQ(&DC, &RC[0]);
+  EXPECT_EQ(&BC, &RC[1]);
+  EXPECT_EQ(&AC, &RC[2]);
+  EXPECT_EQ(2, BC.size());
+  EXPECT_EQ(&BC, CG.lookupSCC(B));
+  EXPECT_EQ(&BC, CG.lookupSCC(C));
+}
+
+TEST(LazyCallGraphTest, InternalRefEdgeToCallNoCycleInterleaved) {
+  LLVMContext Context;
+  // Test for having a post-order prior to changing a ref edge to a call edge
+  // with SCCs connecting to the source and connecting to the target, but not
+  // connecting to both, interleaved between the source and target. This
+  // ensures we correctly partition the range rather than simply moving one or
+  // the other.
+  std::unique_ptr<Module> M =
+      parseAssembly(Context, "define void @a() {\n"
+                             "entry:\n"
+                             "  call void @b1()\n"
+                             "  call void @c1()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @b1() {\n"
+                             "entry:\n"
+                             "  call void @c1()\n"
+                             "  call void @b2()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @c1() {\n"
+                             "entry:\n"
+                             "  call void @b2()\n"
+                             "  call void @c2()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @b2() {\n"
+                             "entry:\n"
+                             "  call void @c2()\n"
+                             "  call void @b3()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @c2() {\n"
+                             "entry:\n"
+                             "  call void @b3()\n"
+                             "  call void @c3()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @b3() {\n"
+                             "entry:\n"
+                             "  call void @c3()\n"
+                             "  call void @d()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @c3() {\n"
+                             "entry:\n"
+                             "  store void()* @b1, void()** undef\n"
+                             "  call void @d()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @d() {\n"
+                             "entry:\n"
+                             "  store void()* @a, void()** undef\n"
+                             "  ret void\n"
+                             "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B1 = *CG.lookup(lookupFunction(*M, "b1"));
+  LazyCallGraph::Node &B2 = *CG.lookup(lookupFunction(*M, "b2"));
+  LazyCallGraph::Node &B3 = *CG.lookup(lookupFunction(*M, "b3"));
+  LazyCallGraph::Node &C1 = *CG.lookup(lookupFunction(*M, "c1"));
+  LazyCallGraph::Node &C2 = *CG.lookup(lookupFunction(*M, "c2"));
+  LazyCallGraph::Node &C3 = *CG.lookup(lookupFunction(*M, "c3"));
+  LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+  LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+  LazyCallGraph::SCC &B1C = *CG.lookupSCC(B1);
+  LazyCallGraph::SCC &B2C = *CG.lookupSCC(B2);
+  LazyCallGraph::SCC &B3C = *CG.lookupSCC(B3);
+  LazyCallGraph::SCC &C1C = *CG.lookupSCC(C1);
+  LazyCallGraph::SCC &C2C = *CG.lookupSCC(C2);
+  LazyCallGraph::SCC &C3C = *CG.lookupSCC(C3);
+  LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
+
+  // Several call edges are initially present to force a particual post-order.
+  // Remove them now, leaving an interleaved post-order pattern.
+  RC.switchTrivialInternalEdgeToRef(B3, C3);
+  RC.switchTrivialInternalEdgeToRef(C2, B3);
+  RC.switchTrivialInternalEdgeToRef(B2, C2);
+  RC.switchTrivialInternalEdgeToRef(C1, B2);
+  RC.switchTrivialInternalEdgeToRef(B1, C1);
+
+  // Check the initial post-order. We ensure this order with the extra edges
+  // that are nuked above.
+  ASSERT_EQ(8, RC.size());
+  EXPECT_EQ(&DC, &RC[0]);
+  EXPECT_EQ(&C3C, &RC[1]);
+  EXPECT_EQ(&B3C, &RC[2]);
+  EXPECT_EQ(&C2C, &RC[3]);
+  EXPECT_EQ(&B2C, &RC[4]);
+  EXPECT_EQ(&C1C, &RC[5]);
+  EXPECT_EQ(&B1C, &RC[6]);
+  EXPECT_EQ(&AC, &RC[7]);
+
+  // Switch C3 -> B1 to a call edge. This doesn't form any new cycles but does
+  // require reordering the SCCs in the face of tricky internal node
+  // structures.
+  EXPECT_FALSE(RC.switchInternalEdgeToCall(C3, B1));
+  ASSERT_EQ(8, RC.size());
+  EXPECT_EQ(&DC, &RC[0]);
+  EXPECT_EQ(&B3C, &RC[1]);
+  EXPECT_EQ(&B2C, &RC[2]);
+  EXPECT_EQ(&B1C, &RC[3]);
+  EXPECT_EQ(&C3C, &RC[4]);
+  EXPECT_EQ(&C2C, &RC[5]);
+  EXPECT_EQ(&C1C, &RC[6]);
+  EXPECT_EQ(&AC, &RC[7]);
+}
+
+TEST(LazyCallGraphTest, InternalRefEdgeToCallBothPartitionAndMerge) {
+  LLVMContext Context;
+  // Test for having a postorder where between the source and target are all
+  // three kinds of other SCCs:
+  // 1) One connected to the target only that have to be shifted below the
+  //    source.
+  // 2) One connected to the source only that have to be shifted below the
+  //    target.
+  // 3) One connected to both source and target that has to remain and get
+  //    merged away.
+  //
+  // To achieve this we construct a heavily connected graph to force
+  // a particular post-order. Then we remove the forcing edges and connect
+  // a cycle.
+  //
+  // Diagram for the graph we want on the left and the graph we use to force
+  // the ordering on the right. Edges ponit down or right.
+  //
+  //   A    |    A    |
+  //  / \   |   / \   |
+  // B   E  |  B   \  |
+  // |\  |  |  |\  |  |
+  // | D |  |  C-D-E  |
+  // |  \|  |  |  \|  |
+  // C   F  |  \   F  |
+  //  \ /   |   \ /   |
+  //   G    |    G    |
+  //
+  // And we form a cycle by connecting F to B.
+  std::unique_ptr<Module> M =
+      parseAssembly(Context, "define void @a() {\n"
+                             "entry:\n"
+                             "  call void @b()\n"
+                             "  call void @e()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @b() {\n"
+                             "entry:\n"
+                             "  call void @c()\n"
+                             "  call void @d()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @c() {\n"
+                             "entry:\n"
+                             "  call void @d()\n"
+                             "  call void @g()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @d() {\n"
+                             "entry:\n"
+                             "  call void @e()\n"
+                             "  call void @f()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @e() {\n"
+                             "entry:\n"
+                             "  call void @f()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @f() {\n"
+                             "entry:\n"
+                             "  store void()* @b, void()** undef\n"
+                             "  call void @g()\n"
+                             "  ret void\n"
+                             "}\n"
+                             "define void @g() {\n"
+                             "entry:\n"
+                             "  store void()* @a, void()** undef\n"
+                             "  ret void\n"
+                             "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  LazyCallGraph::Node &A = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &B = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &C = *CG.lookup(lookupFunction(*M, "c"));
+  LazyCallGraph::Node &D = *CG.lookup(lookupFunction(*M, "d"));
+  LazyCallGraph::Node &E = *CG.lookup(lookupFunction(*M, "e"));
+  LazyCallGraph::Node &F = *CG.lookup(lookupFunction(*M, "f"));
+  LazyCallGraph::Node &G = *CG.lookup(lookupFunction(*M, "g"));
+  LazyCallGraph::SCC &AC = *CG.lookupSCC(A);
+  LazyCallGraph::SCC &BC = *CG.lookupSCC(B);
+  LazyCallGraph::SCC &CC = *CG.lookupSCC(C);
+  LazyCallGraph::SCC &DC = *CG.lookupSCC(D);
+  LazyCallGraph::SCC &EC = *CG.lookupSCC(E);
+  LazyCallGraph::SCC &FC = *CG.lookupSCC(F);
+  LazyCallGraph::SCC &GC = *CG.lookupSCC(G);
+
+  // Remove the extra edges that were used to force a particular post-order.
+  RC.switchTrivialInternalEdgeToRef(C, D);
+  RC.switchTrivialInternalEdgeToRef(D, E);
+
+  // Check the initial post-order. We ensure this order with the extra edges
+  // that are nuked above.
+  ASSERT_EQ(7, RC.size());
+  EXPECT_EQ(&GC, &RC[0]);
+  EXPECT_EQ(&FC, &RC[1]);
+  EXPECT_EQ(&EC, &RC[2]);
+  EXPECT_EQ(&DC, &RC[3]);
+  EXPECT_EQ(&CC, &RC[4]);
+  EXPECT_EQ(&BC, &RC[5]);
+  EXPECT_EQ(&AC, &RC[6]);
+
+  // Switch F -> B to a call edge. This merges B, D, and F into a single SCC,
+  // and has to place the C and E SCCs on either side of it:
+  //   A          A    |
+  //  / \        / \   |
+  // B   E      |   E  |
+  // |\  |       \ /   |
+  // | D |  ->    B    |
+  // |  \|       / \   |
+  // C   F      C   |  |
+  //  \ /        \ /   |
+  //   G          G    |
+  EXPECT_TRUE(RC.switchInternalEdgeToCall(
+      F, B, [&](ArrayRef<LazyCallGraph::SCC *> MergedCs) {
+        ASSERT_EQ(2u, MergedCs.size());
+        EXPECT_EQ(&FC, MergedCs[0]);
+        EXPECT_EQ(&DC, MergedCs[1]);
+      }));
+  EXPECT_EQ(3, BC.size());
+
+  // And make sure the postorder was updated.
+  ASSERT_EQ(5, RC.size());
+  EXPECT_EQ(&GC, &RC[0]);
+  EXPECT_EQ(&CC, &RC[1]);
+  EXPECT_EQ(&BC, &RC[2]);
+  EXPECT_EQ(&EC, &RC[3]);
+  EXPECT_EQ(&AC, &RC[4]);
+}
+
+// Test for IR containing constants using blockaddress constant expressions.
+// These are truly unique constructs: constant expressions with non-constant
+// operands.
+TEST(LazyCallGraphTest, HandleBlockAddress) {
+  LLVMContext Context;
+  std::unique_ptr<Module> M =
+      parseAssembly(Context, "define void @f() {\n"
+                             "entry:\n"
+                             "  ret void\n"
+                             "bb:\n"
+                             "  unreachable\n"
+                             "}\n"
+                             "define void @g(i8** %ptr) {\n"
+                             "entry:\n"
+                             "  store i8* blockaddress(@f, %bb), i8** %ptr\n"
+                             "  ret void\n"
+                             "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &FRC = *I++;
+  LazyCallGraph::RefSCC &GRC = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  LazyCallGraph::Node &F = *CG.lookup(lookupFunction(*M, "f"));
+  LazyCallGraph::Node &G = *CG.lookup(lookupFunction(*M, "g"));
+  EXPECT_EQ(&FRC, CG.lookupRefSCC(F));
+  EXPECT_EQ(&GRC, CG.lookupRefSCC(G));
+  EXPECT_TRUE(GRC.isParentOf(FRC));
+}
+
+TEST(LazyCallGraphTest, ReplaceNodeFunction) {
+  LLVMContext Context;
+  // A graph with several different kinds of edges pointing at a particular
+  // function.
+  std::unique_ptr<Module> M =
+      parseAssembly(Context,
+                    "define void @a(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+                    "  ret void\n"
+                    "}\n"
+                    "define void @b(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+                    "  store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+                    "  call void @d(i8** %ptr)"
+                    "  ret void\n"
+                    "}\n"
+                    "define void @c(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  call void @d(i8** %ptr)"
+                    "  call void @d(i8** %ptr)"
+                    "  store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+                    "  ret void\n"
+                    "}\n"
+                    "define void @d(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+                    "  call void @c(i8** %ptr)"
+                    "  call void @d(i8** %ptr)"
+                    "  store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+                    "  ret void\n"
+                    "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &RC1 = *I++;
+  LazyCallGraph::RefSCC &RC2 = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  ASSERT_EQ(2, RC1.size());
+  LazyCallGraph::SCC &C1 = RC1[0];
+  LazyCallGraph::SCC &C2 = RC1[1];
+
+  LazyCallGraph::Node &AN = *CG.lookup(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &BN = *CG.lookup(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &CN = *CG.lookup(lookupFunction(*M, "c"));
+  LazyCallGraph::Node &DN = *CG.lookup(lookupFunction(*M, "d"));
+  EXPECT_EQ(&C1, CG.lookupSCC(DN));
+  EXPECT_EQ(&C1, CG.lookupSCC(CN));
+  EXPECT_EQ(&C2, CG.lookupSCC(BN));
+  EXPECT_EQ(&RC1, CG.lookupRefSCC(DN));
+  EXPECT_EQ(&RC1, CG.lookupRefSCC(CN));
+  EXPECT_EQ(&RC1, CG.lookupRefSCC(BN));
+  EXPECT_EQ(&RC2, CG.lookupRefSCC(AN));
+
+  // Now we need to build a new function 'e' with the same signature as 'd'.
+  Function &D = DN.getFunction();
+  Function &E = *Function::Create(D.getFunctionType(), D.getLinkage(), "e");
+  D.getParent()->getFunctionList().insert(D.getIterator(), &E);
+
+  // Change each use of 'd' to use 'e'. This is particularly easy as they have
+  // the same type.
+  D.replaceAllUsesWith(&E);
+
+  // Splice the body of the old function into the new one.
+  E.getBasicBlockList().splice(E.begin(), D.getBasicBlockList());
+  // And fix up the one argument.
+  D.arg_begin()->replaceAllUsesWith(&*E.arg_begin());
+  E.arg_begin()->takeName(&*D.arg_begin());
+
+  // Now replace the function in the graph.
+  RC1.replaceNodeFunction(DN, E);
+
+  EXPECT_EQ(&E, &DN.getFunction());
+  EXPECT_EQ(&DN, &(*CN)[DN].getNode());
+  EXPECT_EQ(&DN, &(*BN)[DN].getNode());
+}
+
+TEST(LazyCallGraphTest, RemoveFunctionWithSpurriousRef) {
+  LLVMContext Context;
+  // A graph with a couple of RefSCCs.
+  std::unique_ptr<Module> M =
+      parseAssembly(Context,
+                    "define void @a(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  store i8* bitcast (void(i8**)* @d to i8*), i8** %ptr\n"
+                    "  ret void\n"
+                    "}\n"
+                    "define void @b(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  store i8* bitcast (void(i8**)* @c to i8*), i8** %ptr\n"
+                    "  ret void\n"
+                    "}\n"
+                    "define void @c(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  call void @d(i8** %ptr)"
+                    "  ret void\n"
+                    "}\n"
+                    "define void @d(i8** %ptr) {\n"
+                    "entry:\n"
+                    "  call void @c(i8** %ptr)"
+                    "  store i8* bitcast (void(i8**)* @b to i8*), i8** %ptr\n"
+                    "  ret void\n"
+                    "}\n"
+                    "define void @dead() {\n"
+                    "entry:\n"
+                    "  ret void\n"
+                    "}\n");
+  LazyCallGraph CG = buildCG(*M);
+
+  // Insert spurious ref edges.
+  LazyCallGraph::Node &AN = CG.get(lookupFunction(*M, "a"));
+  LazyCallGraph::Node &BN = CG.get(lookupFunction(*M, "b"));
+  LazyCallGraph::Node &CN = CG.get(lookupFunction(*M, "c"));
+  LazyCallGraph::Node &DN = CG.get(lookupFunction(*M, "d"));
+  LazyCallGraph::Node &DeadN = CG.get(lookupFunction(*M, "dead"));
+  AN.populate();
+  BN.populate();
+  CN.populate();
+  DN.populate();
+  DeadN.populate();
+  CG.insertEdge(AN, DeadN, LazyCallGraph::Edge::Ref);
+  CG.insertEdge(BN, DeadN, LazyCallGraph::Edge::Ref);
+  CG.insertEdge(CN, DeadN, LazyCallGraph::Edge::Ref);
+  CG.insertEdge(DN, DeadN, LazyCallGraph::Edge::Ref);
+
+  // Force the graph to be fully expanded.
+  CG.buildRefSCCs();
+  auto I = CG.postorder_ref_scc_begin();
+  LazyCallGraph::RefSCC &DeadRC = *I++;
+  LazyCallGraph::RefSCC &RC1 = *I++;
+  LazyCallGraph::RefSCC &RC2 = *I++;
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+
+  ASSERT_EQ(2, RC1.size());
+  LazyCallGraph::SCC &C1 = RC1[0];
+  LazyCallGraph::SCC &C2 = RC1[1];
+
+  EXPECT_EQ(&DeadRC, CG.lookupRefSCC(DeadN));
+  EXPECT_EQ(&C1, CG.lookupSCC(DN));
+  EXPECT_EQ(&C1, CG.lookupSCC(CN));
+  EXPECT_EQ(&C2, CG.lookupSCC(BN));
+  EXPECT_EQ(&RC1, CG.lookupRefSCC(DN));
+  EXPECT_EQ(&RC1, CG.lookupRefSCC(CN));
+  EXPECT_EQ(&RC1, CG.lookupRefSCC(BN));
+  EXPECT_EQ(&RC2, CG.lookupRefSCC(AN));
+
+  // Now delete 'dead'. There are no uses of this function but there are
+  // spurious references.
+  CG.removeDeadFunction(DeadN.getFunction());
+
+  // The only observable change should be that the RefSCC is gone from the
+  // postorder sequence.
+  I = CG.postorder_ref_scc_begin();
+  EXPECT_EQ(&RC1, &*I++);
+  EXPECT_EQ(&RC2, &*I++);
+  EXPECT_EQ(CG.postorder_ref_scc_end(), I);
+}
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/LoopInfoTest.cpp b/src/llvm-project/llvm/unittests/Analysis/LoopInfoTest.cpp
new file mode 100644
index 0000000..9a59fc8
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/LoopInfoTest.cpp
@@ -0,0 +1,213 @@
+//===- LoopInfoTest.cpp - LoopInfo unit tests -----------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+/// Build the loop info for the function and run the Test.
+static void
+runWithLoopInfo(Module &M, StringRef FuncName,
+                function_ref<void(Function &F, LoopInfo &LI)> Test) {
+  auto *F = M.getFunction(FuncName);
+  ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
+  // Compute the dominator tree and the loop info for the function.
+  DominatorTree DT(*F);
+  LoopInfo LI(DT);
+  Test(*F, LI);
+}
+
+static std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
+                                              const char *ModuleStr) {
+  SMDiagnostic Err;
+  return parseAssemblyString(ModuleStr, Err, Context);
+}
+
+// This tests that for a loop with a single latch, we get the loop id from
+// its only latch, even in case the loop may not be in a simplified form.
+TEST(LoopInfoTest, LoopWithSingleLatch) {
+  const char *ModuleStr =
+      "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+      "define void @foo(i32 %n) {\n"
+      "entry:\n"
+      "  br i1 undef, label %for.cond, label %for.end\n"
+      "for.cond:\n"
+      "  %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]\n"
+      "  %cmp = icmp slt i32 %i.0, %n\n"
+      "  br i1 %cmp, label %for.inc, label %for.end\n"
+      "for.inc:\n"
+      "  %inc = add nsw i32 %i.0, 1\n"
+      "  br label %for.cond, !llvm.loop !0\n"
+      "for.end:\n"
+      "  ret void\n"
+      "}\n"
+      "!0 = distinct !{!0, !1}\n"
+      "!1 = !{!\"llvm.loop.distribute.enable\", i1 true}\n";
+
+  // Parse the module.
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+
+  runWithLoopInfo(*M, "foo", [&](Function &F, LoopInfo &LI) {
+    Function::iterator FI = F.begin();
+    // First basic block is entry - skip it.
+    BasicBlock *Header = &*(++FI);
+    assert(Header->getName() == "for.cond");
+    Loop *L = LI.getLoopFor(Header);
+
+    // This loop is not in simplified form.
+    EXPECT_FALSE(L->isLoopSimplifyForm());
+
+    // Analyze the loop metadata id.
+    bool loopIDFoundAndSet = false;
+    // Try to get and set the metadata id for the loop.
+    if (MDNode *D = L->getLoopID()) {
+      L->setLoopID(D);
+      loopIDFoundAndSet = true;
+    }
+
+    // We must have successfully found and set the loop id in the
+    // only latch the loop has.
+    EXPECT_TRUE(loopIDFoundAndSet);
+  });
+}
+
+// Test loop id handling for a loop with multiple latches.
+TEST(LoopInfoTest, LoopWithMultipleLatches) {
+  const char *ModuleStr =
+      "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+      "define void @foo(i32 %n) {\n"
+      "entry:\n"
+      "  br i1 undef, label %for.cond, label %for.end\n"
+      "for.cond:\n"
+      "  %i.0 = phi i32 [ 0, %entry ], [ %inc, %latch.1 ], [ %inc, %latch.2 ]\n"
+      "  %inc = add nsw i32 %i.0, 1\n"
+      "  %cmp = icmp slt i32 %i.0, %n\n"
+      "  br i1 %cmp, label %latch.1, label %for.end\n"
+      "latch.1:\n"
+      "  br i1 undef, label %for.cond, label %latch.2, !llvm.loop !0\n"
+      "latch.2:\n"
+      "  br label %for.cond, !llvm.loop !0\n"
+      "for.end:\n"
+      "  ret void\n"
+      "}\n"
+      "!0 = distinct !{!0, !1}\n"
+      "!1 = !{!\"llvm.loop.distribute.enable\", i1 true}\n";
+
+  // Parse the module.
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+
+  runWithLoopInfo(*M, "foo", [&](Function &F, LoopInfo &LI) {
+    Function::iterator FI = F.begin();
+    // First basic block is entry - skip it.
+    BasicBlock *Header = &*(++FI);
+    assert(Header->getName() == "for.cond");
+    Loop *L = LI.getLoopFor(Header);
+    EXPECT_NE(L, nullptr);
+
+    // This loop is not in simplified form.
+    EXPECT_FALSE(L->isLoopSimplifyForm());
+
+    // Try to get and set the metadata id for the loop.
+    MDNode *OldLoopID = L->getLoopID();
+    EXPECT_NE(OldLoopID, nullptr);
+
+    MDNode *NewLoopID = MDNode::get(Context, {nullptr});
+    // Set operand 0 to refer to the loop id itself.
+    NewLoopID->replaceOperandWith(0, NewLoopID);
+
+    L->setLoopID(NewLoopID);
+    EXPECT_EQ(L->getLoopID(), NewLoopID);
+    EXPECT_NE(L->getLoopID(), OldLoopID);
+
+    L->setLoopID(OldLoopID);
+    EXPECT_EQ(L->getLoopID(), OldLoopID);
+    EXPECT_NE(L->getLoopID(), NewLoopID);
+  });
+}
+
+TEST(LoopInfoTest, PreorderTraversals) {
+  const char *ModuleStr = "define void @f() {\n"
+                          "entry:\n"
+                          "  br label %loop.0\n"
+                          "loop.0:\n"
+                          "  br i1 undef, label %loop.0.0, label %loop.1\n"
+                          "loop.0.0:\n"
+                          "  br i1 undef, label %loop.0.0, label %loop.0.1\n"
+                          "loop.0.1:\n"
+                          "  br i1 undef, label %loop.0.1, label %loop.0.2\n"
+                          "loop.0.2:\n"
+                          "  br i1 undef, label %loop.0.2, label %loop.0\n"
+                          "loop.1:\n"
+                          "  br i1 undef, label %loop.1.0, label %end\n"
+                          "loop.1.0:\n"
+                          "  br i1 undef, label %loop.1.0, label %loop.1.1\n"
+                          "loop.1.1:\n"
+                          "  br i1 undef, label %loop.1.1, label %loop.1.2\n"
+                          "loop.1.2:\n"
+                          "  br i1 undef, label %loop.1.2, label %loop.1\n"
+                          "end:\n"
+                          "  ret void\n"
+                          "}\n";
+  // Parse the module.
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+  Function &F = *M->begin();
+
+  DominatorTree DT(F);
+  LoopInfo LI;
+  LI.analyze(DT);
+
+  Function::iterator I = F.begin();
+  ASSERT_EQ("entry", I->getName());
+  ++I;
+  Loop &L_0 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.0", L_0.getHeader()->getName());
+  Loop &L_0_0 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.0.0", L_0_0.getHeader()->getName());
+  Loop &L_0_1 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.0.1", L_0_1.getHeader()->getName());
+  Loop &L_0_2 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.0.2", L_0_2.getHeader()->getName());
+  Loop &L_1 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.1", L_1.getHeader()->getName());
+  Loop &L_1_0 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.1.0", L_1_0.getHeader()->getName());
+  Loop &L_1_1 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.1.1", L_1_1.getHeader()->getName());
+  Loop &L_1_2 = *LI.getLoopFor(&*I++);
+  ASSERT_EQ("loop.1.2", L_1_2.getHeader()->getName());
+
+  auto Preorder = LI.getLoopsInPreorder();
+  ASSERT_EQ(8u, Preorder.size());
+  EXPECT_EQ(&L_0, Preorder[0]);
+  EXPECT_EQ(&L_0_0, Preorder[1]);
+  EXPECT_EQ(&L_0_1, Preorder[2]);
+  EXPECT_EQ(&L_0_2, Preorder[3]);
+  EXPECT_EQ(&L_1, Preorder[4]);
+  EXPECT_EQ(&L_1_0, Preorder[5]);
+  EXPECT_EQ(&L_1_1, Preorder[6]);
+  EXPECT_EQ(&L_1_2, Preorder[7]);
+
+  auto ReverseSiblingPreorder = LI.getLoopsInReverseSiblingPreorder();
+  ASSERT_EQ(8u, ReverseSiblingPreorder.size());
+  EXPECT_EQ(&L_1, ReverseSiblingPreorder[0]);
+  EXPECT_EQ(&L_1_2, ReverseSiblingPreorder[1]);
+  EXPECT_EQ(&L_1_1, ReverseSiblingPreorder[2]);
+  EXPECT_EQ(&L_1_0, ReverseSiblingPreorder[3]);
+  EXPECT_EQ(&L_0, ReverseSiblingPreorder[4]);
+  EXPECT_EQ(&L_0_2, ReverseSiblingPreorder[5]);
+  EXPECT_EQ(&L_0_1, ReverseSiblingPreorder[6]);
+  EXPECT_EQ(&L_0_0, ReverseSiblingPreorder[7]);
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/MemoryBuiltinsTest.cpp b/src/llvm-project/llvm/unittests/Analysis/MemoryBuiltinsTest.cpp
new file mode 100644
index 0000000..898ebee
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/MemoryBuiltinsTest.cpp
@@ -0,0 +1,50 @@
+//===- MemoryBuiltinsTest.cpp - Tests for utilities in MemoryBuiltins.h ---===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+// allocsize should not imply that a function is a traditional allocation
+// function (e.g. that can be optimized out/...); it just tells us how many
+// bytes exist at the pointer handed back by the function.
+TEST(AllocSize, AllocationBuiltinsTest) {
+  LLVMContext Context;
+  Module M("", Context);
+  IntegerType *ArgTy = Type::getInt32Ty(Context);
+
+  Function *AllocSizeFn = Function::Create(
+      FunctionType::get(Type::getInt8PtrTy(Context), {ArgTy}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+
+  AllocSizeFn->addFnAttr(Attribute::getWithAllocSizeArgs(Context, 1, None));
+
+  // 100 is arbitrary.
+  std::unique_ptr<CallInst> Caller(
+      CallInst::Create(AllocSizeFn, {ConstantInt::get(ArgTy, 100)}));
+
+  const TargetLibraryInfo *TLI = nullptr;
+  EXPECT_FALSE(isNoAliasFn(Caller.get(), TLI));
+  EXPECT_FALSE(isMallocLikeFn(Caller.get(), TLI));
+  EXPECT_FALSE(isCallocLikeFn(Caller.get(), TLI));
+  EXPECT_FALSE(isAllocLikeFn(Caller.get(), TLI));
+
+  // FIXME: We might be able to treat allocsize functions as general allocation
+  // functions. For the moment, being conservative seems better (and we'd have
+  // to plumb stuff around `isNoAliasFn`).
+  EXPECT_FALSE(isAllocationFn(Caller.get(), TLI));
+}
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/MemorySSATest.cpp b/src/llvm-project/llvm/unittests/Analysis/MemorySSATest.cpp
new file mode 100644
index 0000000..505e185
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/MemorySSATest.cpp
@@ -0,0 +1,1586 @@
+//===- MemorySSA.cpp - Unit tests for MemorySSA ---------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/MemorySSAUpdater.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+const static char DLString[] = "e-i64:64-f80:128-n8:16:32:64-S128";
+
+/// There's a lot of common setup between these tests. This fixture helps reduce
+/// that. Tests should mock up a function, store it in F, and then call
+/// setupAnalyses().
+class MemorySSATest : public testing::Test {
+protected:
+  // N.B. Many of these members depend on each other (e.g. the Module depends on
+  // the Context, etc.). So, order matters here (and in TestAnalyses).
+  LLVMContext C;
+  Module M;
+  IRBuilder<> B;
+  DataLayout DL;
+  TargetLibraryInfoImpl TLII;
+  TargetLibraryInfo TLI;
+  Function *F;
+
+  // Things that we need to build after the function is created.
+  struct TestAnalyses {
+    DominatorTree DT;
+    AssumptionCache AC;
+    AAResults AA;
+    BasicAAResult BAA;
+    // We need to defer MSSA construction until AA is *entirely* set up, which
+    // requires calling addAAResult. Hence, we just use a pointer here.
+    std::unique_ptr<MemorySSA> MSSA;
+    MemorySSAWalker *Walker;
+
+    TestAnalyses(MemorySSATest &Test)
+        : DT(*Test.F), AC(*Test.F), AA(Test.TLI),
+          BAA(Test.DL, *Test.F, Test.TLI, AC, &DT) {
+      AA.addAAResult(BAA);
+      MSSA = make_unique<MemorySSA>(*Test.F, &AA, &DT);
+      Walker = MSSA->getWalker();
+    }
+  };
+
+  std::unique_ptr<TestAnalyses> Analyses;
+
+  void setupAnalyses() {
+    assert(F);
+    Analyses.reset(new TestAnalyses(*this));
+  }
+
+public:
+  MemorySSATest()
+      : M("MemorySSATest", C), B(C), DL(DLString), TLI(TLII), F(nullptr) {}
+};
+
+TEST_F(MemorySSATest, CreateALoad) {
+  // We create a diamond where there is a store on one side, and then after
+  // building MemorySSA, create a load after the merge point, and use it to test
+  // updating by creating an access for the load.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left);
+  Argument *PointerArg = &*F->arg_begin();
+  B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Merge, Left);
+  BranchInst::Create(Merge, Right);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+  // Add the load
+  B.SetInsertPoint(Merge);
+  LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+  // MemoryPHI should already exist.
+  MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
+  EXPECT_NE(MP, nullptr);
+
+  // Create the load memory acccess
+  MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+      LoadInst, MP, Merge, MemorySSA::Beginning));
+  MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+  EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+  MSSA.verifyMemorySSA();
+}
+TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
+  // We create a diamond, then build memoryssa with no memory accesses, and
+  // incrementally update it by inserting a store in the, entry, a load in the
+  // merge point, then a store in the branch, another load in the merge point,
+  // and then a store in the entry.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left, Left->begin());
+  Argument *PointerArg = &*F->arg_begin();
+  B.SetInsertPoint(Left);
+  B.CreateBr(Merge);
+  B.SetInsertPoint(Right);
+  B.CreateBr(Merge);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+  // Add the store
+  B.SetInsertPoint(Entry, Entry->begin());
+  StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+  MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
+      EntryStore, nullptr, Entry, MemorySSA::Beginning);
+  Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
+
+  // Add the load
+  B.SetInsertPoint(Merge, Merge->begin());
+  LoadInst *FirstLoad = B.CreateLoad(PointerArg);
+
+  // MemoryPHI should not already exist.
+  MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
+  EXPECT_EQ(MP, nullptr);
+
+  // Create the load memory access
+  MemoryUse *FirstLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+      FirstLoad, nullptr, Merge, MemorySSA::Beginning));
+  Updater.insertUse(FirstLoadAccess);
+  // Should just have a load using the entry access, because it should discover
+  // the phi is trivial
+  EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), EntryStoreAccess);
+
+  // Create a store on the left
+  // Add the store
+  B.SetInsertPoint(Left, Left->begin());
+  StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
+  MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
+      LeftStore, nullptr, Left, MemorySSA::Beginning);
+  Updater.insertDef(cast<MemoryDef>(LeftStoreAccess), false);
+  // We don't touch existing loads, so we need to create a new one to get a phi
+  // Add the second load
+  B.SetInsertPoint(Merge, Merge->begin());
+  LoadInst *SecondLoad = B.CreateLoad(PointerArg);
+
+  // MemoryPHI should not already exist.
+  MP = MSSA.getMemoryAccess(Merge);
+  EXPECT_EQ(MP, nullptr);
+
+  // Create the load memory access
+  MemoryUse *SecondLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+      SecondLoad, nullptr, Merge, MemorySSA::Beginning));
+  Updater.insertUse(SecondLoadAccess);
+  // Now the load should be a phi of the entry store and the left store
+  MemoryPhi *MergePhi =
+      dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
+  EXPECT_NE(MergePhi, nullptr);
+  EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
+  // Now create a store below the existing one in the entry
+  B.SetInsertPoint(Entry, --Entry->end());
+  StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+  MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
+      SecondEntryStore, nullptr, Entry, MemorySSA::End);
+  // Insert it twice just to test renaming
+  Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), false);
+  EXPECT_NE(FirstLoadAccess->getDefiningAccess(), MergePhi);
+  Updater.insertDef(cast<MemoryDef>(SecondEntryStoreAccess), true);
+  EXPECT_EQ(FirstLoadAccess->getDefiningAccess(), MergePhi);
+  // and make sure the phi below it got updated, despite being blocks away
+  MergePhi = dyn_cast<MemoryPhi>(SecondLoadAccess->getDefiningAccess());
+  EXPECT_NE(MergePhi, nullptr);
+  EXPECT_EQ(MergePhi->getIncomingValue(0), SecondEntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, CreateALoadUpdater) {
+  // We create a diamond, then build memoryssa with no memory accesses, and
+  // incrementally update it by inserting a store in one of the branches, and a
+  // load in the merge point
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left, Left->begin());
+  Argument *PointerArg = &*F->arg_begin();
+  B.SetInsertPoint(Left);
+  B.CreateBr(Merge);
+  B.SetInsertPoint(Right);
+  B.CreateBr(Merge);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+  B.SetInsertPoint(Left, Left->begin());
+  // Add the store
+  StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
+  MemoryAccess *StoreAccess =
+      Updater.createMemoryAccessInBB(SI, nullptr, Left, MemorySSA::Beginning);
+  Updater.insertDef(cast<MemoryDef>(StoreAccess));
+
+  // Add the load
+  B.SetInsertPoint(Merge, Merge->begin());
+  LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+  // MemoryPHI should not already exist.
+  MemoryPhi *MP = MSSA.getMemoryAccess(Merge);
+  EXPECT_EQ(MP, nullptr);
+
+  // Create the load memory acccess
+  MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+      LoadInst, nullptr, Merge, MemorySSA::Beginning));
+  Updater.insertUse(LoadAccess);
+  MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+  EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, SinkLoad) {
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left, Left->begin());
+  Argument *PointerArg = &*F->arg_begin();
+  B.SetInsertPoint(Left);
+  B.CreateBr(Merge);
+  B.SetInsertPoint(Right);
+  B.CreateBr(Merge);
+
+  // Load in left block
+  B.SetInsertPoint(Left, Left->begin());
+  LoadInst *LoadInst1 = B.CreateLoad(PointerArg);
+  // Store in merge block
+  B.SetInsertPoint(Merge, Merge->begin());
+  B.CreateStore(B.getInt8(16), PointerArg);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+
+  // Mimic sinking of a load:
+  // - clone load
+  // - insert in "exit" block
+  // - insert in mssa
+  // - remove from original block
+
+  LoadInst *LoadInstClone = cast<LoadInst>(LoadInst1->clone());
+  Merge->getInstList().insert(Merge->begin(), LoadInstClone);
+  MemoryAccess * NewLoadAccess =
+      Updater.createMemoryAccessInBB(LoadInstClone, nullptr,
+                                     LoadInstClone->getParent(),
+                                     MemorySSA::Beginning);
+  Updater.insertUse(cast<MemoryUse>(NewLoadAccess));
+  MSSA.verifyMemorySSA();
+  Updater.removeMemoryAccess(MSSA.getMemoryAccess(LoadInst1));
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStore) {
+  // We create a diamond where there is a in the entry, a store on one side, and
+  // a load at the end.  After building MemorySSA, we test updating by moving
+  // the store from the side block to the entry block. This destroys the old
+  // access.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  Argument *PointerArg = &*F->arg_begin();
+  StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left);
+  StoreInst *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Merge, Left);
+  BranchInst::Create(Merge, Right);
+  B.SetInsertPoint(Merge);
+  B.CreateLoad(PointerArg);
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+  // Move the store
+  SideStore->moveBefore(Entry->getTerminator());
+  MemoryAccess *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+  MemoryAccess *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+  MemoryAccess *NewStoreAccess = Updater.createMemoryAccessAfter(
+      SideStore, EntryStoreAccess, EntryStoreAccess);
+  EntryStoreAccess->replaceAllUsesWith(NewStoreAccess);
+  Updater.removeMemoryAccess(SideStoreAccess);
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStoreUpdater) {
+  // We create a diamond where there is a in the entry, a store on one side, and
+  // a load at the end.  After building MemorySSA, we test updating by moving
+  // the store from the side block to the entry block.  This destroys the old
+  // access.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  Argument *PointerArg = &*F->arg_begin();
+  StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left);
+  auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Merge, Left);
+  BranchInst::Create(Merge, Right);
+  B.SetInsertPoint(Merge);
+  auto *MergeLoad = B.CreateLoad(PointerArg);
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+
+  // Move the store
+  SideStore->moveBefore(Entry->getTerminator());
+  auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+  auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+  auto *NewStoreAccess = Updater.createMemoryAccessAfter(
+      SideStore, EntryStoreAccess, EntryStoreAccess);
+  // Before, the load will point to a phi of the EntryStore and SideStore.
+  auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
+  EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
+  MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
+  EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+  Updater.removeMemoryAccess(SideStoreAccess);
+  Updater.insertDef(cast<MemoryDef>(NewStoreAccess));
+  // After it's a phi of the new side store access.
+  EXPECT_EQ(MergePhi->getIncomingValue(0), NewStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(1), NewStoreAccess);
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStoreUpdaterMove) {
+  // We create a diamond where there is a in the entry, a store on one side, and
+  // a load at the end.  After building MemorySSA, we test updating by moving
+  // the store from the side block to the entry block.  This does not destroy
+  // the old access.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  Argument *PointerArg = &*F->arg_begin();
+  StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left);
+  auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Merge, Left);
+  BranchInst::Create(Merge, Right);
+  B.SetInsertPoint(Merge);
+  auto *MergeLoad = B.CreateLoad(PointerArg);
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+
+  // Move the store
+  auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+  auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+  // Before, the load will point to a phi of the EntryStore and SideStore.
+  auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
+  EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
+  MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
+  EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+  SideStore->moveBefore(*EntryStore->getParent(), ++EntryStore->getIterator());
+  Updater.moveAfter(SideStoreAccess, EntryStoreAccess);
+  // After, it's a phi of the side store.
+  EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
+
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveAStoreAllAround) {
+  // We create a diamond where there is a in the entry, a store on one side, and
+  // a load at the end.  After building MemorySSA, we test updating by moving
+  // the store from the side block to the entry block, then to the other side
+  // block, then to before the load.  This does not destroy the old access.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  Argument *PointerArg = &*F->arg_begin();
+  StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left);
+  auto *SideStore = B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Merge, Left);
+  BranchInst::Create(Merge, Right);
+  B.SetInsertPoint(Merge);
+  auto *MergeLoad = B.CreateLoad(PointerArg);
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+
+  // Move the store
+  auto *EntryStoreAccess = MSSA.getMemoryAccess(EntryStore);
+  auto *SideStoreAccess = MSSA.getMemoryAccess(SideStore);
+  // Before, the load will point to a phi of the EntryStore and SideStore.
+  auto *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(MergeLoad));
+  EXPECT_TRUE(isa<MemoryPhi>(LoadAccess->getDefiningAccess()));
+  MemoryPhi *MergePhi = cast<MemoryPhi>(LoadAccess->getDefiningAccess());
+  EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(0), SideStoreAccess);
+  // Move the store before the entry store
+  SideStore->moveBefore(*EntryStore->getParent(), EntryStore->getIterator());
+  Updater.moveBefore(SideStoreAccess, EntryStoreAccess);
+  // After, it's a phi of the entry store.
+  EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+  MSSA.verifyMemorySSA();
+  // Now move the store to the right branch
+  SideStore->moveBefore(*Right, Right->begin());
+  Updater.moveToPlace(SideStoreAccess, Right, MemorySSA::Beginning);
+  MSSA.verifyMemorySSA();
+  EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(1), SideStoreAccess);
+  // Now move it before the load
+  SideStore->moveBefore(MergeLoad);
+  Updater.moveBefore(SideStoreAccess, LoadAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
+  EXPECT_EQ(MergePhi->getIncomingValue(1), EntryStoreAccess);
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, RemoveAPhi) {
+  // We create a diamond where there is a store on one side, and then a load
+  // after the merge point.  This enables us to test a bunch of different
+  // removal cases.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left);
+  Argument *PointerArg = &*F->arg_begin();
+  StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Merge, Left);
+  BranchInst::Create(Merge, Right);
+  B.SetInsertPoint(Merge);
+  LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+
+  // Before, the load will be a use of a phi<store, liveonentry>.
+  MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
+  MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
+  MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+  EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+  // Kill the store
+  Updater.removeMemoryAccess(StoreAccess);
+  MemoryPhi *MP = cast<MemoryPhi>(DefiningAccess);
+  // Verify the phi ended up as liveonentry, liveonentry
+  for (auto &Op : MP->incoming_values())
+    EXPECT_TRUE(MSSA.isLiveOnEntryDef(cast<MemoryAccess>(Op.get())));
+  // Replace the phi uses with the live on entry def
+  MP->replaceAllUsesWith(MSSA.getLiveOnEntryDef());
+  // Verify the load is now defined by liveOnEntryDef
+  EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
+  // Remove the PHI
+  Updater.removeMemoryAccess(MP);
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, RemoveMemoryAccess) {
+  // We create a diamond where there is a store on one side, and then a load
+  // after the merge point.  This enables us to test a bunch of different
+  // removal cases.
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  BasicBlock *Entry(BasicBlock::Create(C, "", F));
+  BasicBlock *Left(BasicBlock::Create(C, "", F));
+  BasicBlock *Right(BasicBlock::Create(C, "", F));
+  BasicBlock *Merge(BasicBlock::Create(C, "", F));
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Left, Right);
+  B.SetInsertPoint(Left);
+  Argument *PointerArg = &*F->arg_begin();
+  StoreInst *StoreInst = B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Merge, Left);
+  BranchInst::Create(Merge, Right);
+  B.SetInsertPoint(Merge);
+  LoadInst *LoadInst = B.CreateLoad(PointerArg);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+  MemorySSAUpdater Updater(&MSSA);
+
+  // Before, the load will be a use of a phi<store, liveonentry>. It should be
+  // the same after.
+  MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LoadInst));
+  MemoryDef *StoreAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreInst));
+  MemoryAccess *DefiningAccess = LoadAccess->getDefiningAccess();
+  EXPECT_TRUE(isa<MemoryPhi>(DefiningAccess));
+  // The load is currently clobbered by one of the phi arguments, so the walker
+  // should determine the clobbering access as the phi.
+  EXPECT_EQ(DefiningAccess, Walker->getClobberingMemoryAccess(LoadInst));
+  Updater.removeMemoryAccess(StoreAccess);
+  MSSA.verifyMemorySSA();
+  // After the removeaccess, let's see if we got the right accesses
+  // The load should still point to the phi ...
+  EXPECT_EQ(DefiningAccess, LoadAccess->getDefiningAccess());
+  // but we should now get live on entry for the clobbering definition of the
+  // load, since it will walk past the phi node since every argument is the
+  // same.
+  // XXX: This currently requires either removing the phi or resetting optimized
+  // on the load
+
+  EXPECT_FALSE(
+      MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
+  // If we reset optimized, we get live on entry.
+  LoadAccess->resetOptimized();
+  EXPECT_TRUE(
+      MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(LoadInst)));
+  // The phi should now be a two entry phi with two live on entry defs.
+  for (const auto &Op : DefiningAccess->operands()) {
+    MemoryAccess *Operand = cast<MemoryAccess>(&*Op);
+    EXPECT_TRUE(MSSA.isLiveOnEntryDef(Operand));
+  }
+
+  // Now we try to remove the single valued phi
+  Updater.removeMemoryAccess(DefiningAccess);
+  MSSA.verifyMemorySSA();
+  // Now the load should be a load of live on entry.
+  EXPECT_TRUE(MSSA.isLiveOnEntryDef(LoadAccess->getDefiningAccess()));
+}
+
+// We had a bug with caching where the walker would report MemoryDef#3's clobber
+// (below) was MemoryDef#1.
+//
+// define void @F(i8*) {
+//   %A = alloca i8, i8 1
+// ; 1 = MemoryDef(liveOnEntry)
+//   store i8 0, i8* %A
+// ; 2 = MemoryDef(1)
+//   store i8 1, i8* %A
+// ; 3 = MemoryDef(2)
+//   store i8 2, i8* %A
+// }
+TEST_F(MemorySSATest, TestTripleStore) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  StoreInst *S1 = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+  StoreInst *S2 = B.CreateStore(ConstantInt::get(Int8, 1), Alloca);
+  StoreInst *S3 = B.CreateStore(ConstantInt::get(Int8, 2), Alloca);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  unsigned I = 0;
+  for (StoreInst *V : {S1, S2, S3}) {
+    // Everything should be clobbered by its defining access
+    MemoryAccess *DefiningAccess = MSSA.getMemoryAccess(V)->getDefiningAccess();
+    MemoryAccess *WalkerClobber = Walker->getClobberingMemoryAccess(V);
+    EXPECT_EQ(DefiningAccess, WalkerClobber)
+        << "Store " << I << " doesn't have the correct clobbering access";
+    // EXPECT_EQ expands such that if we increment I above, it won't get
+    // incremented except when we try to print the error message.
+    ++I;
+  }
+}
+
+// ...And fixing the above bug made it obvious that, when walking, MemorySSA's
+// walker was caching the initial node it walked. This was fine (albeit
+// mostly redundant) unless the initial node being walked is a clobber for the
+// query. In that case, we'd cache that the node clobbered itself.
+TEST_F(MemorySSATest, TestStoreAndLoad) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  Instruction *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+  Instruction *LI = B.CreateLoad(Alloca);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LI);
+  EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SI));
+  EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SI)));
+}
+
+// Another bug (related to the above two fixes): It was noted that, given the
+// following code:
+// ; 1 = MemoryDef(liveOnEntry)
+// store i8 0, i8* %1
+//
+// ...A query to getClobberingMemoryAccess(MemoryAccess*, MemoryLocation) would
+// hand back the store (correctly). A later call to
+// getClobberingMemoryAccess(const Instruction*) would also hand back the store
+// (incorrectly; it should return liveOnEntry).
+//
+// This test checks that repeated calls to either function returns what they're
+// meant to.
+TEST_F(MemorySSATest, TestStoreDoubleQuery) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  StoreInst *SI = B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  MemoryAccess *StoreAccess = MSSA.getMemoryAccess(SI);
+  MemoryLocation StoreLoc = MemoryLocation::get(SI);
+  MemoryAccess *Clobber =
+      Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
+  MemoryAccess *LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
+
+  EXPECT_EQ(Clobber, StoreAccess);
+  EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
+
+  // Try again (with entries in the cache already) for good measure...
+  Clobber = Walker->getClobberingMemoryAccess(StoreAccess, StoreLoc);
+  LiveOnEntry = Walker->getClobberingMemoryAccess(SI);
+  EXPECT_EQ(Clobber, StoreAccess);
+  EXPECT_TRUE(MSSA.isLiveOnEntryDef(LiveOnEntry));
+}
+
+// Bug: During phi optimization, the walker wouldn't cache to the proper result
+// in the farthest-walked BB.
+//
+// Specifically, it would assume that whatever we walked to was a clobber.
+// "Whatever we walked to" isn't a clobber if we hit a cache entry.
+//
+// ...So, we need a test case that looks like:
+//    A
+//   / \
+//  B   |
+//   \ /
+//    C
+//
+// Where, when we try to optimize a thing in 'C', a blocker is found in 'B'.
+// The walk must determine that the blocker exists by using cache entries *while
+// walking* 'B'.
+TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "A", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Constant *One = ConstantInt::get(Int8, 1);
+  Constant *Zero = ConstantInt::get(Int8, 0);
+  Value *AllocA = B.CreateAlloca(Int8, One, "a");
+  Value *AllocB = B.CreateAlloca(Int8, One, "b");
+  BasicBlock *IfThen = BasicBlock::Create(C, "B", F);
+  BasicBlock *IfEnd = BasicBlock::Create(C, "C", F);
+
+  B.CreateCondBr(UndefValue::get(Type::getInt1Ty(C)), IfThen, IfEnd);
+
+  B.SetInsertPoint(IfThen);
+  Instruction *FirstStore = B.CreateStore(Zero, AllocA);
+  B.CreateStore(Zero, AllocB);
+  Instruction *ALoad0 = B.CreateLoad(AllocA, "");
+  Instruction *BStore = B.CreateStore(Zero, AllocB);
+  // Due to use optimization/etc. we make a store to A, which is removed after
+  // we build MSSA. This helps keep the test case simple-ish.
+  Instruction *KillStore = B.CreateStore(Zero, AllocA);
+  Instruction *ALoad = B.CreateLoad(AllocA, "");
+  B.CreateBr(IfEnd);
+
+  B.SetInsertPoint(IfEnd);
+  Instruction *BelowPhi = B.CreateStore(Zero, AllocA);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+  MemorySSAUpdater Updater(&MSSA);
+
+  // Kill `KillStore`; it exists solely so that the load after it won't be
+  // optimized to FirstStore.
+  Updater.removeMemoryAccess(MSSA.getMemoryAccess(KillStore));
+  KillStore->eraseFromParent();
+  auto *ALoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(ALoad));
+  EXPECT_EQ(ALoadMA->getDefiningAccess(), MSSA.getMemoryAccess(BStore));
+
+  // Populate the cache for the store to AllocB directly after FirstStore. It
+  // should point to something in block B (so something in D can't be optimized
+  // to it).
+  MemoryAccess *Load0Clobber = Walker->getClobberingMemoryAccess(ALoad0);
+  EXPECT_EQ(MSSA.getMemoryAccess(FirstStore), Load0Clobber);
+
+  // If the bug exists, this will introduce a bad cache entry for %a on BStore.
+  // It will point to the store to %b after FirstStore. This only happens during
+  // phi optimization.
+  MemoryAccess *BottomClobber = Walker->getClobberingMemoryAccess(BelowPhi);
+  MemoryAccess *Phi = MSSA.getMemoryAccess(IfEnd);
+  EXPECT_EQ(BottomClobber, Phi);
+
+  // This query will first check the cache for {%a, BStore}. It should point to
+  // FirstStore, not to the store after FirstStore.
+  MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
+  EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
+}
+
+// Test that our walker properly handles loads with the invariant group
+// attribute. It's a bit hacky, since we add the invariant attribute *after*
+// building MSSA. Otherwise, the use optimizer will optimize it for us, which
+// isn't what we want.
+// FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
+TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Constant *One = ConstantInt::get(Int8, 1);
+  Value *AllocA = B.CreateAlloca(Int8, One, "");
+
+  Instruction *Store = B.CreateStore(One, AllocA);
+  Instruction *Load = B.CreateLoad(AllocA);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
+  auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
+  EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
+
+  // ...At the time of writing, no cache should exist for LoadMA. Be a bit
+  // flexible to future changes.
+  Walker->invalidateInfo(LoadMA);
+  Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
+
+  MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
+  EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
+}
+
+// Test loads get reoptimized properly by the walker.
+TEST_F(MemorySSATest, WalkerReopt) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  Instruction *SIA = B.CreateStore(ConstantInt::get(Int8, 0), AllocaA);
+  Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+  Instruction *SIB = B.CreateStore(ConstantInt::get(Int8, 0), AllocaB);
+  Instruction *LIA = B.CreateLoad(AllocaA);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+  MemorySSAUpdater Updater(&MSSA);
+
+  MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LIA);
+  MemoryUse *LoadAccess = cast<MemoryUse>(MSSA.getMemoryAccess(LIA));
+  EXPECT_EQ(LoadClobber, MSSA.getMemoryAccess(SIA));
+  EXPECT_TRUE(MSSA.isLiveOnEntryDef(Walker->getClobberingMemoryAccess(SIA)));
+  Updater.removeMemoryAccess(LoadAccess);
+
+  // Create the load memory access pointing to an unoptimized place.
+  MemoryUse *NewLoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+      LIA, MSSA.getMemoryAccess(SIB), LIA->getParent(), MemorySSA::End));
+  // This should it cause it to be optimized
+  EXPECT_EQ(Walker->getClobberingMemoryAccess(NewLoadAccess), LoadClobber);
+  EXPECT_EQ(NewLoadAccess->getDefiningAccess(), LoadClobber);
+}
+
+// Test out MemorySSAUpdater::moveBefore
+TEST_F(MemorySSATest, MoveAboveMemoryDef) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *A = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  Value *B_ = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+  Value *C = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
+
+  StoreInst *StoreA0 = B.CreateStore(ConstantInt::get(Int8, 0), A);
+  StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 0), B_);
+  LoadInst *LoadB = B.CreateLoad(B_);
+  StoreInst *StoreA1 = B.CreateStore(ConstantInt::get(Int8, 4), A);
+  StoreInst *StoreC = B.CreateStore(ConstantInt::get(Int8, 4), C);
+  StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 4), A);
+  LoadInst *LoadC = B.CreateLoad(C);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker &Walker = *Analyses->Walker;
+
+  MemorySSAUpdater Updater(&MSSA);
+  StoreC->moveBefore(StoreB);
+  Updater.moveBefore(cast<MemoryDef>(MSSA.getMemoryAccess(StoreC)),
+                     cast<MemoryDef>(MSSA.getMemoryAccess(StoreB)));
+
+  MSSA.verifyMemorySSA();
+
+  EXPECT_EQ(MSSA.getMemoryAccess(StoreB)->getDefiningAccess(),
+            MSSA.getMemoryAccess(StoreC));
+  EXPECT_EQ(MSSA.getMemoryAccess(StoreC)->getDefiningAccess(),
+            MSSA.getMemoryAccess(StoreA0));
+  EXPECT_EQ(MSSA.getMemoryAccess(StoreA2)->getDefiningAccess(),
+            MSSA.getMemoryAccess(StoreA1));
+  EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadB),
+            MSSA.getMemoryAccess(StoreB));
+  EXPECT_EQ(Walker.getClobberingMemoryAccess(LoadC),
+            MSSA.getMemoryAccess(StoreC));
+
+  // exercise block numbering
+  EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreC),
+                                    MSSA.getMemoryAccess(StoreB)));
+  EXPECT_TRUE(MSSA.locallyDominates(MSSA.getMemoryAccess(StoreA1),
+                                    MSSA.getMemoryAccess(StoreA2)));
+}
+
+TEST_F(MemorySSATest, Irreducible) {
+  // Create the equivalent of
+  // x = something
+  // if (...)
+  //    goto second_loop_entry
+  // while (...) {
+  // second_loop_entry:
+  // }
+  // use(x)
+
+  SmallVector<PHINode *, 8> Inserted;
+  IRBuilder<> B(C);
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+
+  // Make blocks
+  BasicBlock *IfBB = BasicBlock::Create(C, "if", F);
+  BasicBlock *LoopStartBB = BasicBlock::Create(C, "loopstart", F);
+  BasicBlock *LoopMainBB = BasicBlock::Create(C, "loopmain", F);
+  BasicBlock *AfterLoopBB = BasicBlock::Create(C, "afterloop", F);
+  B.SetInsertPoint(IfBB);
+  B.CreateCondBr(B.getTrue(), LoopMainBB, LoopStartBB);
+  B.SetInsertPoint(LoopStartBB);
+  B.CreateBr(LoopMainBB);
+  B.SetInsertPoint(LoopMainBB);
+  B.CreateCondBr(B.getTrue(), LoopStartBB, AfterLoopBB);
+  B.SetInsertPoint(AfterLoopBB);
+  Argument *FirstArg = &*F->arg_begin();
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAUpdater Updater(&MSSA);
+  // Create the load memory acccess
+  LoadInst *LoadInst = B.CreateLoad(FirstArg);
+  MemoryUse *LoadAccess = cast<MemoryUse>(Updater.createMemoryAccessInBB(
+      LoadInst, nullptr, AfterLoopBB, MemorySSA::Beginning));
+  Updater.insertUse(LoadAccess);
+  MSSA.verifyMemorySSA();
+}
+
+TEST_F(MemorySSATest, MoveToBeforeLiveOnEntryInvalidatesCache) {
+  // Create:
+  //   %1 = alloca i8
+  //   ; 1 = MemoryDef(liveOnEntry)
+  //   store i8 0, i8* %1
+  //   ; 2 = MemoryDef(1)
+  //   store i8 0, i8* %1
+  //
+  // ...And be sure that MSSA's caching doesn't give us `1` for the clobber of
+  // `2` after `1` is removed.
+  IRBuilder<> B(C);
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+
+  BasicBlock *Entry = BasicBlock::Create(C, "if", F);
+  B.SetInsertPoint(Entry);
+
+  Value *A = B.CreateAlloca(B.getInt8Ty());
+  StoreInst *StoreA = B.CreateStore(B.getInt8(0), A);
+  StoreInst *StoreB = B.CreateStore(B.getInt8(0), A);
+
+  setupAnalyses();
+
+  MemorySSA &MSSA = *Analyses->MSSA;
+
+  auto *DefA = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
+  auto *DefB = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
+
+  MemoryAccess *BClobber = MSSA.getWalker()->getClobberingMemoryAccess(DefB);
+  ASSERT_EQ(DefA, BClobber);
+
+  MemorySSAUpdater(&MSSA).removeMemoryAccess(DefA);
+  StoreA->eraseFromParent();
+
+  EXPECT_EQ(DefB->getDefiningAccess(), MSSA.getLiveOnEntryDef());
+
+  EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefB),
+            MSSA.getLiveOnEntryDef())
+      << "(DefA = " << DefA << ")";
+}
+
+TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
+  // Create:
+  //   %x = alloca i8
+  //   %y = alloca i8
+  //   ; 1 = MemoryDef(liveOnEntry)
+  //   store i8 0, i8* %x
+  //   ; 2 = MemoryDef(1)
+  //   store i8 0, i8* %y
+  //   ; 3 = MemoryDef(2)
+  //   store i8 0, i8* %x
+  //
+  // And be sure that MSSA's caching handles the removal of def `1`
+  // appropriately.
+  IRBuilder<> B(C);
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+
+  BasicBlock *Entry = BasicBlock::Create(C, "if", F);
+  B.SetInsertPoint(Entry);
+
+  Value *X = B.CreateAlloca(B.getInt8Ty());
+  Value *Y = B.CreateAlloca(B.getInt8Ty());
+  StoreInst *StoreX1 = B.CreateStore(B.getInt8(0), X);
+  StoreInst *StoreY = B.CreateStore(B.getInt8(0), Y);
+  StoreInst *StoreX2 = B.CreateStore(B.getInt8(0), X);
+
+  setupAnalyses();
+
+  MemorySSA &MSSA = *Analyses->MSSA;
+
+  auto *DefX1 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX1));
+  auto *DefY = cast<MemoryDef>(MSSA.getMemoryAccess(StoreY));
+  auto *DefX2 = cast<MemoryDef>(MSSA.getMemoryAccess(StoreX2));
+
+  EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
+  MemoryAccess *X2Clobber = MSSA.getWalker()->getClobberingMemoryAccess(DefX2);
+  ASSERT_EQ(DefX1, X2Clobber);
+
+  MemorySSAUpdater(&MSSA).removeMemoryAccess(DefX1);
+  StoreX1->eraseFromParent();
+
+  EXPECT_EQ(DefX2->getDefiningAccess(), DefY);
+  EXPECT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(DefX2),
+            MSSA.getLiveOnEntryDef())
+      << "(DefX1 = " << DefX1 << ")";
+}
+
+// Test Must alias for optimized uses
+TEST_F(MemorySSATest, TestLoadMustAlias) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+
+  B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
+  // Check load from LOE
+  LoadInst *LA1 = B.CreateLoad(AllocaA, "");
+  // Check load alias cached for second load
+  LoadInst *LA2 = B.CreateLoad(AllocaA, "");
+
+  B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
+  // Check load from store/def
+  LoadInst *LA3 = B.CreateLoad(AllocaA, "");
+  // Check load alias cached for second load
+  LoadInst *LA4 = B.CreateLoad(AllocaA, "");
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+
+  unsigned I = 0;
+  for (LoadInst *V : {LA1, LA2}) {
+    MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
+    EXPECT_EQ(MemUse->getOptimizedAccessType(), None)
+        << "Load " << I << " doesn't have the correct alias information";
+    // EXPECT_EQ expands such that if we increment I above, it won't get
+    // incremented except when we try to print the error message.
+    ++I;
+  }
+  for (LoadInst *V : {LA3, LA4}) {
+    MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
+    EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
+        << "Load " << I << " doesn't have the correct alias information";
+    // EXPECT_EQ expands such that if we increment I above, it won't get
+    // incremented except when we try to print the error message.
+    ++I;
+  }
+}
+
+// Test Must alias for optimized defs.
+TEST_F(MemorySSATest, TestStoreMustAlias) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+  StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
+  StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
+  StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaA);
+  StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaB);
+  StoreInst *SA3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaA);
+  StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 3), AllocaB);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  unsigned I = 0;
+  for (StoreInst *V : {SA1, SB1, SA2, SB2, SA3, SB3}) {
+    MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
+    EXPECT_EQ(MemDef->isOptimized(), false)
+        << "Store " << I << " is optimized from the start?";
+    EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
+        << "Store " << I
+        << " has correct alias information before being optimized?";
+    if (V == SA1)
+      Walker->getClobberingMemoryAccess(V);
+    else {
+      MemoryAccess *Def = MemDef->getDefiningAccess();
+      MemoryAccess *Clob = Walker->getClobberingMemoryAccess(V);
+      EXPECT_NE(Def, Clob) << "Store " << I
+                           << " has Defining Access equal to Clobbering Access";
+    }
+    EXPECT_EQ(MemDef->isOptimized(), true)
+        << "Store " << I << " was not optimized";
+    if (I == 0 || I == 1)
+      EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
+          << "Store " << I << " doesn't have the correct alias information";
+    else
+      EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
+          << "Store " << I << " doesn't have the correct alias information";
+    // EXPECT_EQ expands such that if we increment I above, it won't get
+    // incremented except when we try to print the error message.
+    ++I;
+  }
+}
+
+// Test May alias for optimized uses.
+TEST_F(MemorySSATest, TestLoadMayAlias) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(),
+                                         {B.getInt8PtrTy(), B.getInt8PtrTy()},
+                                         false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  auto *ArgIt = F->arg_begin();
+  Argument *PointerA = &*ArgIt;
+  Argument *PointerB = &*(++ArgIt);
+  B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
+  LoadInst *LA1 = B.CreateLoad(PointerA, "");
+  B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
+  LoadInst *LB1 = B.CreateLoad(PointerB, "");
+  B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
+  LoadInst *LA2 = B.CreateLoad(PointerA, "");
+  B.CreateStore(ConstantInt::get(Int8, 0), PointerB);
+  LoadInst *LB2 = B.CreateLoad(PointerB, "");
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+
+  unsigned I = 0;
+  for (LoadInst *V : {LA1, LB1}) {
+    MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
+    EXPECT_EQ(MemUse->getOptimizedAccessType(), MayAlias)
+        << "Load " << I << " doesn't have the correct alias information";
+    // EXPECT_EQ expands such that if we increment I above, it won't get
+    // incremented except when we try to print the error message.
+    ++I;
+  }
+  for (LoadInst *V : {LA2, LB2}) {
+    MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
+    EXPECT_EQ(MemUse->getOptimizedAccessType(), MustAlias)
+        << "Load " << I << " doesn't have the correct alias information";
+    // EXPECT_EQ expands such that if we increment I above, it won't get
+    // incremented except when we try to print the error message.
+    ++I;
+  }
+}
+
+// Test May alias for optimized defs.
+TEST_F(MemorySSATest, TestStoreMayAlias) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(),
+                                         {B.getInt8PtrTy(), B.getInt8PtrTy()},
+                                         false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  auto *ArgIt = F->arg_begin();
+  Argument *PointerA = &*ArgIt;
+  Argument *PointerB = &*(++ArgIt);
+  Value *AllocaC = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "C");
+  // Store into arg1, must alias because it's LOE => must
+  StoreInst *SA1 = B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
+  // Store into arg2, may alias store to arg1 => may
+  StoreInst *SB1 = B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
+  // Store into aloca, no alias with args, so must alias LOE => must
+  StoreInst *SC1 = B.CreateStore(ConstantInt::get(Int8, 2), AllocaC);
+  // Store into arg1, may alias store to arg2 => may
+  StoreInst *SA2 = B.CreateStore(ConstantInt::get(Int8, 3), PointerA);
+  // Store into arg2, may alias store to arg1 => may
+  StoreInst *SB2 = B.CreateStore(ConstantInt::get(Int8, 4), PointerB);
+  // Store into aloca, no alias with args, so must alias SC1 => must
+  StoreInst *SC2 = B.CreateStore(ConstantInt::get(Int8, 5), AllocaC);
+  // Store into arg2, must alias store to arg2 => must
+  StoreInst *SB3 = B.CreateStore(ConstantInt::get(Int8, 6), PointerB);
+  std::initializer_list<StoreInst *> Sts = {SA1, SB1, SC1, SA2, SB2, SC2, SB3};
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  unsigned I = 0;
+  for (StoreInst *V : Sts) {
+    MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
+    EXPECT_EQ(MemDef->isOptimized(), false)
+        << "Store " << I << " is optimized from the start?";
+    EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
+        << "Store " << I
+        << " has correct alias information before being optimized?";
+    ++I;
+  }
+
+  for (StoreInst *V : Sts)
+    Walker->getClobberingMemoryAccess(V);
+
+  I = 0;
+  for (StoreInst *V : Sts) {
+    MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
+    EXPECT_EQ(MemDef->isOptimized(), true)
+        << "Store " << I << " was not optimized";
+    if (I == 1 || I == 3 || I == 4)
+      EXPECT_EQ(MemDef->getOptimizedAccessType(), MayAlias)
+          << "Store " << I << " doesn't have the correct alias information";
+    else if (I == 0 || I == 2)
+      EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
+          << "Store " << I << " doesn't have the correct alias information";
+    else
+      EXPECT_EQ(MemDef->getOptimizedAccessType(), MustAlias)
+          << "Store " << I << " doesn't have the correct alias information";
+    // EXPECT_EQ expands such that if we increment I above, it won't get
+    // incremented except when we try to print the error message.
+    ++I;
+  }
+}
+
+TEST_F(MemorySSATest, LifetimeMarkersAreClobbers) {
+  // Example code:
+  // define void @a(i8* %foo) {
+  //   %bar = getelementptr i8, i8* %foo, i64 1
+  //   store i8 0, i8* %foo
+  //   store i8 0, i8* %bar
+  //   call void @llvm.lifetime.end.p0i8(i64 8, i32* %p)
+  //   call void @llvm.lifetime.start.p0i8(i64 8, i32* %p)
+  //   store i8 0, i8* %foo
+  //   store i8 0, i8* %bar
+  //   ret void
+  // }
+  //
+  // Patterns like this are possible after inlining; the stores to %foo and %bar
+  // should both be clobbered by the lifetime.start call if they're dominated by
+  // it.
+
+  IRBuilder<> B(C);
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+
+  // Make blocks
+  BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
+
+  B.SetInsertPoint(Entry);
+  Value *Foo = &*F->arg_begin();
+
+  Value *Bar = B.CreateGEP(Foo, B.getInt64(1), "bar");
+
+  B.CreateStore(B.getInt8(0), Foo);
+  B.CreateStore(B.getInt8(0), Bar);
+
+  auto GetLifetimeIntrinsic = [&](Intrinsic::ID ID) {
+    return Intrinsic::getDeclaration(&M, ID, {Foo->getType()});
+  };
+
+  B.CreateCall(GetLifetimeIntrinsic(Intrinsic::lifetime_end),
+               {B.getInt64(2), Foo});
+  Instruction *LifetimeStart = B.CreateCall(
+      GetLifetimeIntrinsic(Intrinsic::lifetime_start), {B.getInt64(2), Foo});
+
+  Instruction *FooStore = B.CreateStore(B.getInt8(0), Foo);
+  Instruction *BarStore = B.CreateStore(B.getInt8(0), Bar);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+
+  MemoryAccess *LifetimeStartAccess = MSSA.getMemoryAccess(LifetimeStart);
+  ASSERT_NE(LifetimeStartAccess, nullptr);
+
+  MemoryAccess *FooAccess = MSSA.getMemoryAccess(FooStore);
+  ASSERT_NE(FooAccess, nullptr);
+
+  MemoryAccess *BarAccess = MSSA.getMemoryAccess(BarStore);
+  ASSERT_NE(BarAccess, nullptr);
+
+  MemoryAccess *FooClobber =
+      MSSA.getWalker()->getClobberingMemoryAccess(FooAccess);
+  EXPECT_EQ(FooClobber, LifetimeStartAccess);
+
+  MemoryAccess *BarClobber =
+      MSSA.getWalker()->getClobberingMemoryAccess(BarAccess);
+  EXPECT_EQ(BarClobber, LifetimeStartAccess);
+}
+
+TEST_F(MemorySSATest, DefOptimizationsAreInvalidatedOnMoving) {
+  IRBuilder<> B(C);
+  F = Function::Create(FunctionType::get(B.getVoidTy(), {B.getInt1Ty()}, false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+
+  // Make a CFG like
+  //     entry
+  //      / \
+  //     a   b
+  //      \ /
+  //       c
+  //
+  // Put a def in A and a def in B, move the def from A -> B, observe as the
+  // optimization is invalidated.
+  BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
+  BasicBlock *BlockA = BasicBlock::Create(C, "a", F);
+  BasicBlock *BlockB = BasicBlock::Create(C, "b", F);
+  BasicBlock *BlockC = BasicBlock::Create(C, "c", F);
+
+  B.SetInsertPoint(Entry);
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "alloc");
+  StoreInst *StoreEntry = B.CreateStore(B.getInt8(0), Alloca);
+  B.CreateCondBr(B.getTrue(), BlockA, BlockB);
+
+  B.SetInsertPoint(BlockA);
+  StoreInst *StoreA = B.CreateStore(B.getInt8(1), Alloca);
+  B.CreateBr(BlockC);
+
+  B.SetInsertPoint(BlockB);
+  StoreInst *StoreB = B.CreateStore(B.getInt8(2), Alloca);
+  B.CreateBr(BlockC);
+
+  B.SetInsertPoint(BlockC);
+  B.CreateUnreachable();
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+
+  auto *AccessEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreEntry));
+  auto *StoreAEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
+  auto *StoreBEntry = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
+
+  ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
+            AccessEntry);
+  ASSERT_TRUE(StoreAEntry->isOptimized());
+
+  ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreBEntry),
+            AccessEntry);
+  ASSERT_TRUE(StoreBEntry->isOptimized());
+
+  // Note that if we did InsertionPlace::Beginning, we don't go out of our way
+  // to invalidate the cache for StoreBEntry. If the user wants to actually do
+  // moves like these, it's up to them to ensure that nearby cache entries are
+  // correctly invalidated (which, in general, requires walking all instructions
+  // that the moved instruction dominates. So we probably shouldn't be doing
+  // moves like this in general. Still, works as a test-case. ;) )
+  MemorySSAUpdater(&MSSA).moveToPlace(StoreAEntry, BlockB,
+                                      MemorySSA::InsertionPlace::End);
+  ASSERT_FALSE(StoreAEntry->isOptimized());
+  ASSERT_EQ(MSSA.getWalker()->getClobberingMemoryAccess(StoreAEntry),
+            StoreBEntry);
+}
+
+TEST_F(MemorySSATest, TestOptimizedDefsAreProperUses) {
+  F = Function::Create(FunctionType::get(B.getVoidTy(),
+                                         {B.getInt8PtrTy(), B.getInt8PtrTy()},
+                                         false),
+                       GlobalValue::ExternalLinkage, "F", &M);
+  B.SetInsertPoint(BasicBlock::Create(C, "", F));
+  Type *Int8 = Type::getInt8Ty(C);
+  Value *AllocA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  Value *AllocB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
+
+  StoreInst *StoreA = B.CreateStore(ConstantInt::get(Int8, 0), AllocA);
+  StoreInst *StoreB = B.CreateStore(ConstantInt::get(Int8, 1), AllocB);
+  StoreInst *StoreA2 = B.CreateStore(ConstantInt::get(Int8, 2), AllocA);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+
+  // If these don't hold, there's no chance of the test result being useful.
+  ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA),
+            MSSA.getLiveOnEntryDef());
+  ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreB),
+            MSSA.getLiveOnEntryDef());
+  auto *StoreAAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA));
+  auto *StoreA2Access = cast<MemoryDef>(MSSA.getMemoryAccess(StoreA2));
+  ASSERT_EQ(Walker->getClobberingMemoryAccess(StoreA2), StoreAAccess);
+  ASSERT_EQ(StoreA2Access->getOptimized(), StoreAAccess);
+
+  auto *StoreBAccess = cast<MemoryDef>(MSSA.getMemoryAccess(StoreB));
+  ASSERT_LT(StoreAAccess->getID(), StoreBAccess->getID());
+  ASSERT_LT(StoreBAccess->getID(), StoreA2Access->getID());
+
+  auto SortVecByID = [](std::vector<const MemoryDef *> &Defs) {
+    llvm::sort(Defs, [](const MemoryDef *LHS, const MemoryDef *RHS) {
+      return LHS->getID() < RHS->getID();
+    });
+  };
+
+  auto SortedUserList = [&](const MemoryDef *MD) {
+    std::vector<const MemoryDef *> Result;
+    transform(MD->users(), std::back_inserter(Result),
+              [](const User *U) { return cast<MemoryDef>(U); });
+    SortVecByID(Result);
+    return Result;
+  };
+
+  // Use std::vectors, since they have nice pretty-printing if the test fails.
+  // Parens are necessary because EXPECT_EQ is a macro, and we have commas in
+  // our init lists...
+  EXPECT_EQ(SortedUserList(StoreAAccess),
+            (std::vector<const MemoryDef *>{StoreBAccess, StoreA2Access}));
+
+  EXPECT_EQ(SortedUserList(StoreBAccess),
+            std::vector<const MemoryDef *>{StoreA2Access});
+
+  // StoreAAccess should be present twice, since it uses liveOnEntry for both
+  // its defining and optimized accesses. This is a bit awkward, and is not
+  // relied upon anywhere at the moment. If this is painful, we can fix it.
+  EXPECT_EQ(SortedUserList(cast<MemoryDef>(MSSA.getLiveOnEntryDef())),
+            (std::vector<const MemoryDef *>{StoreAAccess, StoreAAccess,
+                                            StoreBAccess}));
+}
+
+//   entry
+//     |
+//   header
+//    / \
+// body  |
+//    \ /
+//    exit
+// header:
+//  ; 1 = MemoryDef(liveOnEntry)
+// body:
+//  ; 2 = MemoryDef(1)
+// exit:
+//  ; 3 = MemoryPhi({body, 2}, {header, 1})
+//  ; 4 = MemoryDef(3); optimized to 3, cannot optimize thorugh phi.
+//  Insert edge: entry -> exit, check mssa Update is correct.
+TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiNotOpt) {
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  Argument *PointerArg = &*F->arg_begin();
+  BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
+  BasicBlock *Header(BasicBlock::Create(C, "header", F));
+  BasicBlock *Body(BasicBlock::Create(C, "body", F));
+  BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
+  B.SetInsertPoint(Entry);
+  BranchInst::Create(Header, Entry);
+  B.SetInsertPoint(Header);
+  B.CreateStore(B.getInt8(16), PointerArg);
+  B.CreateCondBr(B.getTrue(), Exit, Body);
+  B.SetInsertPoint(Body);
+  B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(Exit, Body);
+  B.SetInsertPoint(Exit);
+  StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+  std::unique_ptr<MemorySSAUpdater> MSSAU =
+      make_unique<MemorySSAUpdater>(&MSSA);
+
+  MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
+  EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
+
+  // Alter CFG, add edge: entry -> exit
+  Entry->getTerminator()->eraseFromParent();
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Header, Exit);
+  SmallVector<CFGUpdate, 1> Updates;
+  Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
+  Analyses->DT.applyUpdates(Updates);
+  MSSAU->applyInsertUpdates(Updates, Analyses->DT);
+  EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S1));
+}
+
+//   entry
+//     |
+//   header
+//    / \
+// body  |
+//    \ /
+//    exit
+// header:
+//  ; 1 = MemoryDef(liveOnEntry)
+// body:
+//  ; 2 = MemoryDef(1)
+// exit:
+//  ; 3 = MemoryPhi({body, 2}, {header, 1})
+//  ; 4 = MemoryDef(3); optimize this to 1 now, added edge should invalidate
+//  the optimized access.
+//  Insert edge: entry -> exit, check mssa Update is correct.
+TEST_F(MemorySSATest, TestAddedEdgeToBlockWithPhiOpt) {
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  Argument *PointerArg = &*F->arg_begin();
+  Type *Int8 = Type::getInt8Ty(C);
+  BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
+  BasicBlock *Header(BasicBlock::Create(C, "header", F));
+  BasicBlock *Body(BasicBlock::Create(C, "body", F));
+  BasicBlock *Exit(BasicBlock::Create(C, "exit", F));
+
+  B.SetInsertPoint(Entry);
+  Value *Alloca = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
+  BranchInst::Create(Header, Entry);
+
+  B.SetInsertPoint(Header);
+  StoreInst *S1 = B.CreateStore(B.getInt8(16), PointerArg);
+  B.CreateCondBr(B.getTrue(), Exit, Body);
+
+  B.SetInsertPoint(Body);
+  B.CreateStore(ConstantInt::get(Int8, 0), Alloca);
+  BranchInst::Create(Exit, Body);
+
+  B.SetInsertPoint(Exit);
+  StoreInst *S2 = B.CreateStore(B.getInt8(16), PointerArg);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  MemorySSAWalker *Walker = Analyses->Walker;
+  std::unique_ptr<MemorySSAUpdater> MSSAU =
+      make_unique<MemorySSAUpdater>(&MSSA);
+
+  MemoryDef *DefS1 = cast<MemoryDef>(MSSA.getMemoryAccess(S1));
+  EXPECT_EQ(DefS1, Walker->getClobberingMemoryAccess(S2));
+
+  // Alter CFG, add edge: entry -> exit
+  Entry->getTerminator()->eraseFromParent();
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), Header, Exit);
+  SmallVector<CFGUpdate, 1> Updates;
+  Updates.push_back({cfg::UpdateKind::Insert, Entry, Exit});
+  Analyses->DT.applyUpdates(Updates);
+  MSSAU->applyInsertUpdates(Updates, Analyses->DT);
+
+  MemoryPhi *Phi = MSSA.getMemoryAccess(Exit);
+  EXPECT_EQ(Phi, Walker->getClobberingMemoryAccess(S2));
+}
+
+//   entry
+//    /  |
+//   a   |
+//  / \  |
+//  b c  f
+//  \ /  |
+//   d   |
+//    \ /
+//     e
+// f:
+//  ; 1 = MemoryDef(liveOnEntry)
+// e:
+//  ; 2 = MemoryPhi({d, liveOnEntry}, {f, 1})
+//
+// Insert edge: f -> c, check update is correct.
+// After update:
+// f:
+//  ; 1 = MemoryDef(liveOnEntry)
+// c:
+//  ; 3 = MemoryPhi({a, liveOnEntry}, {f, 1})
+// d:
+//  ; 4 = MemoryPhi({b, liveOnEntry}, {c, 3})
+// e:
+//  ; 2 = MemoryPhi({d, 4}, {f, 1})
+TEST_F(MemorySSATest, TestAddedEdgeToBlockWithNoPhiAddNewPhis) {
+  F = Function::Create(
+      FunctionType::get(B.getVoidTy(), {B.getInt8PtrTy()}, false),
+      GlobalValue::ExternalLinkage, "F", &M);
+  Argument *PointerArg = &*F->arg_begin();
+  BasicBlock *Entry(BasicBlock::Create(C, "entry", F));
+  BasicBlock *ABlock(BasicBlock::Create(C, "a", F));
+  BasicBlock *BBlock(BasicBlock::Create(C, "b", F));
+  BasicBlock *CBlock(BasicBlock::Create(C, "c", F));
+  BasicBlock *DBlock(BasicBlock::Create(C, "d", F));
+  BasicBlock *EBlock(BasicBlock::Create(C, "e", F));
+  BasicBlock *FBlock(BasicBlock::Create(C, "f", F));
+
+  B.SetInsertPoint(Entry);
+  B.CreateCondBr(B.getTrue(), ABlock, FBlock);
+  B.SetInsertPoint(ABlock);
+  B.CreateCondBr(B.getTrue(), BBlock, CBlock);
+  B.SetInsertPoint(BBlock);
+  BranchInst::Create(DBlock, BBlock);
+  B.SetInsertPoint(CBlock);
+  BranchInst::Create(DBlock, CBlock);
+  B.SetInsertPoint(DBlock);
+  BranchInst::Create(EBlock, DBlock);
+  B.SetInsertPoint(FBlock);
+  B.CreateStore(B.getInt8(16), PointerArg);
+  BranchInst::Create(EBlock, FBlock);
+
+  setupAnalyses();
+  MemorySSA &MSSA = *Analyses->MSSA;
+  std::unique_ptr<MemorySSAUpdater> MSSAU =
+      make_unique<MemorySSAUpdater>(&MSSA);
+
+  // Alter CFG, add edge: f -> c
+  FBlock->getTerminator()->eraseFromParent();
+  B.SetInsertPoint(FBlock);
+  B.CreateCondBr(B.getTrue(), CBlock, EBlock);
+  SmallVector<CFGUpdate, 1> Updates;
+  Updates.push_back({cfg::UpdateKind::Insert, FBlock, CBlock});
+  Analyses->DT.applyUpdates(Updates);
+  MSSAU->applyInsertUpdates(Updates, Analyses->DT);
+
+  MemoryPhi *MPC = MSSA.getMemoryAccess(CBlock);
+  EXPECT_NE(MPC, nullptr);
+  MemoryPhi *MPD = MSSA.getMemoryAccess(DBlock);
+  EXPECT_NE(MPD, nullptr);
+  MemoryPhi *MPE = MSSA.getMemoryAccess(EBlock);
+  EXPECT_EQ(MPD, MPE->getIncomingValueForBlock(DBlock));
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/OrderedBasicBlockTest.cpp b/src/llvm-project/llvm/unittests/Analysis/OrderedBasicBlockTest.cpp
new file mode 100644
index 0000000..b8b9ff0
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/OrderedBasicBlockTest.cpp
@@ -0,0 +1,58 @@
+//===- OrderedBasicBlockTest.cpp - OrderedBasicBlock unit tests -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/OrderedBasicBlock.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+class OrderedBasicBlockTest : public testing::Test {
+protected:
+  LLVMContext C;
+
+  std::unique_ptr<Module> makeLLVMModule() {
+    const char *ModuleString = R"(define i32 @f(i32 %x) {
+                                    %add = add i32 %x, 42
+                                    ret i32 %add
+                                  })";
+    SMDiagnostic Err;
+    auto foo = parseAssemblyString(ModuleString, Err, C);
+    return foo;
+  }
+};
+
+TEST_F(OrderedBasicBlockTest, Basic) {
+  auto M = makeLLVMModule();
+  Function *F = M->getFunction("f");
+  BasicBlock::iterator I = F->front().begin();
+  Instruction *Add = &*I++;
+  Instruction *Ret = &*I++;
+
+  OrderedBasicBlock OBB(&F->front());
+  // Intentionally duplicated to verify cached and uncached are the same.
+  EXPECT_FALSE(OBB.dominates(Add, Add));
+  EXPECT_FALSE(OBB.dominates(Add, Add));
+  EXPECT_TRUE(OBB.dominates(Add, Ret));
+  EXPECT_TRUE(OBB.dominates(Add, Ret));
+  EXPECT_FALSE(OBB.dominates(Ret, Add));
+  EXPECT_FALSE(OBB.dominates(Ret, Add));
+  EXPECT_FALSE(OBB.dominates(Ret, Ret));
+  EXPECT_FALSE(OBB.dominates(Ret, Ret));
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/src/llvm-project/llvm/unittests/Analysis/OrderedInstructionsTest.cpp b/src/llvm-project/llvm/unittests/Analysis/OrderedInstructionsTest.cpp
new file mode 100644
index 0000000..dc1b25b
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/OrderedInstructionsTest.cpp
@@ -0,0 +1,65 @@
+//===- OrderedInstructions.cpp - Unit tests for OrderedInstructions  ------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/OrderedInstructions.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+/// Check intra-basicblock and inter-basicblock dominance using
+/// OrderedInstruction.
+TEST(OrderedInstructionsTest, DominanceTest) {
+  LLVMContext Ctx;
+  Module M("test", Ctx);
+  IRBuilder<> B(Ctx);
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Ctx), {B.getInt8PtrTy()}, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+
+  // Create the function as follow and check for dominance relation.
+  //
+  // test():
+  //  bbx:
+  //    loadx;
+  //    loady;
+  //  bby:
+  //    loadz;
+  //    return;
+  //
+  // More specifically, check for loadx -> (dominates) loady,
+  // loady -> loadx and loady -> loadz.
+  //
+  // Create BBX with 2 loads.
+  BasicBlock *BBX = BasicBlock::Create(Ctx, "bbx", F);
+  B.SetInsertPoint(BBX);
+  Argument *PointerArg = &*F->arg_begin();
+  LoadInst *LoadInstX = B.CreateLoad(PointerArg);
+  LoadInst *LoadInstY = B.CreateLoad(PointerArg);
+
+  // Create BBY with 1 load.
+  BasicBlock *BBY = BasicBlock::Create(Ctx, "bby", F);
+  B.SetInsertPoint(BBY);
+  LoadInst *LoadInstZ = B.CreateLoad(PointerArg);
+  B.CreateRet(LoadInstZ);
+  std::unique_ptr<DominatorTree> DT(new DominatorTree(*F));
+  OrderedInstructions OI(&*DT);
+
+  // Intra-BB dominance test.
+  EXPECT_TRUE(OI.dominates(LoadInstX, LoadInstY));
+  EXPECT_FALSE(OI.dominates(LoadInstY, LoadInstX));
+
+  // Inter-BB dominance test.
+  EXPECT_TRUE(OI.dominates(LoadInstY, LoadInstZ));
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/PhiValuesTest.cpp b/src/llvm-project/llvm/unittests/Analysis/PhiValuesTest.cpp
new file mode 100644
index 0000000..303b0df
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/PhiValuesTest.cpp
@@ -0,0 +1,208 @@
+//===- PhiValuesTest.cpp - PhiValues unit tests ---------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/PhiValues.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+TEST(PhiValuesTest, SimplePhi) {
+  LLVMContext C;
+  Module M("PhiValuesTest", C);
+
+  Type *VoidTy = Type::getVoidTy(C);
+  Type *I1Ty = Type::getInt1Ty(C);
+  Type *I32Ty = Type::getInt32Ty(C);
+  Type *I32PtrTy = Type::getInt32PtrTy(C);
+
+  // Create a function with phis that do not have other phis as incoming values
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FunctionType::get(VoidTy, false)));
+
+  BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
+  BasicBlock *If = BasicBlock::Create(C, "if", F);
+  BasicBlock *Else = BasicBlock::Create(C, "else", F);
+  BasicBlock *Then = BasicBlock::Create(C, "then", F);
+  BranchInst::Create(If, Else, UndefValue::get(I1Ty), Entry);
+  BranchInst::Create(Then, If);
+  BranchInst::Create(Then, Else);
+
+  Value *Val1 = new LoadInst(UndefValue::get(I32PtrTy), "val1", Entry);
+  Value *Val2 = new LoadInst(UndefValue::get(I32PtrTy), "val2", Entry);
+  Value *Val3 = new LoadInst(UndefValue::get(I32PtrTy), "val3", Entry);
+  Value *Val4 = new LoadInst(UndefValue::get(I32PtrTy), "val4", Entry);
+
+  PHINode *Phi1 = PHINode::Create(I32Ty, 2, "phi1", Then);
+  Phi1->addIncoming(Val1, If);
+  Phi1->addIncoming(Val2, Else);
+  PHINode *Phi2 = PHINode::Create(I32Ty, 2, "phi2", Then);
+  Phi2->addIncoming(Val1, If);
+  Phi2->addIncoming(Val3, Else);
+
+  PhiValues PV(*F);
+  PhiValues::ValueSet Vals;
+
+  // Check that simple usage works
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val2));
+  Vals = PV.getValuesForPhi(Phi2);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val3));
+
+  // Check that values are updated when one value is replaced with another
+  Val1->replaceAllUsesWith(Val4);
+  PV.invalidateValue(Val1);
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val4));
+  EXPECT_TRUE(Vals.count(Val2));
+  Vals = PV.getValuesForPhi(Phi2);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val4));
+  EXPECT_TRUE(Vals.count(Val3));
+
+  // Check that setting in incoming value directly updates the values
+  Phi1->setIncomingValue(0, Val1);
+  PV.invalidateValue(Phi1);
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val2));
+}
+
+TEST(PhiValuesTest, DependentPhi) {
+  LLVMContext C;
+  Module M("PhiValuesTest", C);
+
+  Type *VoidTy = Type::getVoidTy(C);
+  Type *I1Ty = Type::getInt1Ty(C);
+  Type *I32Ty = Type::getInt32Ty(C);
+  Type *I32PtrTy = Type::getInt32PtrTy(C);
+
+  // Create a function with a phi that has another phi as an incoming value
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FunctionType::get(VoidTy, false)));
+
+  BasicBlock *Entry = BasicBlock::Create(C, "entry", F);
+  BasicBlock *If1 = BasicBlock::Create(C, "if1", F);
+  BasicBlock *Else1 = BasicBlock::Create(C, "else1", F);
+  BasicBlock *Then = BasicBlock::Create(C, "then", F);
+  BasicBlock *If2 = BasicBlock::Create(C, "if2", F);
+  BasicBlock *Else2 = BasicBlock::Create(C, "else2", F);
+  BasicBlock *End = BasicBlock::Create(C, "then", F);
+  BranchInst::Create(If1, Else1, UndefValue::get(I1Ty), Entry);
+  BranchInst::Create(Then, If1);
+  BranchInst::Create(Then, Else1);
+  BranchInst::Create(If2, Else2, UndefValue::get(I1Ty), Then);
+  BranchInst::Create(End, If2);
+  BranchInst::Create(End, Else2);
+
+  Value *Val1 = new LoadInst(UndefValue::get(I32PtrTy), "val1", Entry);
+  Value *Val2 = new LoadInst(UndefValue::get(I32PtrTy), "val2", Entry);
+  Value *Val3 = new LoadInst(UndefValue::get(I32PtrTy), "val3", Entry);
+  Value *Val4 = new LoadInst(UndefValue::get(I32PtrTy), "val4", Entry);
+
+  PHINode *Phi1 = PHINode::Create(I32Ty, 2, "phi1", Then);
+  Phi1->addIncoming(Val1, If1);
+  Phi1->addIncoming(Val2, Else1);
+  PHINode *Phi2 = PHINode::Create(I32Ty, 2, "phi2", Then);
+  Phi2->addIncoming(Val2, If1);
+  Phi2->addIncoming(Val3, Else1);
+  PHINode *Phi3 = PHINode::Create(I32Ty, 2, "phi3", End);
+  Phi3->addIncoming(Phi1, If2);
+  Phi3->addIncoming(Val3, Else2);
+
+  PhiValues PV(*F);
+  PhiValues::ValueSet Vals;
+
+  // Check that simple usage works
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val2));
+  Vals = PV.getValuesForPhi(Phi2);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val2));
+  EXPECT_TRUE(Vals.count(Val3));
+  Vals = PV.getValuesForPhi(Phi3);
+  EXPECT_EQ(Vals.size(), 3u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val2));
+  EXPECT_TRUE(Vals.count(Val3));
+
+  // Check that changing an incoming value in the dependent phi changes the depending phi
+  Phi1->setIncomingValue(0, Val4);
+  PV.invalidateValue(Phi1);
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val4));
+  EXPECT_TRUE(Vals.count(Val2));
+  Vals = PV.getValuesForPhi(Phi2);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val2));
+  EXPECT_TRUE(Vals.count(Val3));
+  Vals = PV.getValuesForPhi(Phi3);
+  EXPECT_EQ(Vals.size(), 3u);
+  EXPECT_TRUE(Vals.count(Val4));
+  EXPECT_TRUE(Vals.count(Val2));
+  EXPECT_TRUE(Vals.count(Val3));
+
+  // Check that replacing an incoming phi with a value works
+  Phi3->setIncomingValue(0, Val1);
+  PV.invalidateValue(Phi3);
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val4));
+  EXPECT_TRUE(Vals.count(Val2));
+  Vals = PV.getValuesForPhi(Phi2);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val2));
+  EXPECT_TRUE(Vals.count(Val3));
+  Vals = PV.getValuesForPhi(Phi3);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val3));
+
+  // Check that adding a phi as an incoming value works
+  Phi3->setIncomingValue(1, Phi2);
+  PV.invalidateValue(Phi3);
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val4));
+  EXPECT_TRUE(Vals.count(Val2));
+  Vals = PV.getValuesForPhi(Phi2);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val2));
+  EXPECT_TRUE(Vals.count(Val3));
+  Vals = PV.getValuesForPhi(Phi3);
+  EXPECT_EQ(Vals.size(), 3u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val2));
+  EXPECT_TRUE(Vals.count(Val3));
+
+  // Check that replacing an incoming phi then deleting it works
+  Phi3->setIncomingValue(1, Val2);
+  PV.invalidateValue(Phi2);
+  Phi2->eraseFromParent();
+  PV.invalidateValue(Phi3);
+  Vals = PV.getValuesForPhi(Phi1);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val4));
+  EXPECT_TRUE(Vals.count(Val2));
+  Vals = PV.getValuesForPhi(Phi3);
+  EXPECT_EQ(Vals.size(), 2u);
+  EXPECT_TRUE(Vals.count(Val1));
+  EXPECT_TRUE(Vals.count(Val2));
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp b/src/llvm-project/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
new file mode 100644
index 0000000..95d8426
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/ProfileSummaryInfoTest.cpp
@@ -0,0 +1,215 @@
+//===- ProfileSummaryInfoTest.cpp - ProfileSummaryInfo unit tests ---------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/ProfileSummaryInfo.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+class ProfileSummaryInfoTest : public testing::Test {
+protected:
+  LLVMContext C;
+  std::unique_ptr<BranchProbabilityInfo> BPI;
+  std::unique_ptr<DominatorTree> DT;
+  std::unique_ptr<LoopInfo> LI;
+
+  ProfileSummaryInfo buildPSI(Module *M) {
+    return ProfileSummaryInfo(*M);
+  }
+  BlockFrequencyInfo buildBFI(Function &F) {
+    DT.reset(new DominatorTree(F));
+    LI.reset(new LoopInfo(*DT));
+    BPI.reset(new BranchProbabilityInfo(F, *LI));
+    return BlockFrequencyInfo(F, *BPI, *LI);
+  }
+  std::unique_ptr<Module> makeLLVMModule(const char *ProfKind = nullptr) {
+    const char *ModuleString =
+        "define i32 @g(i32 %x) !prof !21 {{\n"
+        "  ret i32 0\n"
+        "}\n"
+        "define i32 @h(i32 %x) !prof !22 {{\n"
+        "  ret i32 0\n"
+        "}\n"
+        "define i32 @f(i32 %x) !prof !20 {{\n"
+        "bb0:\n"
+        "  %y1 = icmp eq i32 %x, 0 \n"
+        "  br i1 %y1, label %bb1, label %bb2, !prof !23 \n"
+        "bb1:\n"
+        "  %z1 = call i32 @g(i32 %x)\n"
+        "  br label %bb3\n"
+        "bb2:\n"
+        "  %z2 = call i32 @h(i32 %x)\n"
+        "  br label %bb3\n"
+        "bb3:\n"
+        "  %y2 = phi i32 [0, %bb1], [1, %bb2] \n"
+        "  ret i32 %y2\n"
+        "}\n"
+        "!20 = !{{!\"function_entry_count\", i64 400}\n"
+        "!21 = !{{!\"function_entry_count\", i64 1}\n"
+        "!22 = !{{!\"function_entry_count\", i64 100}\n"
+        "!23 = !{{!\"branch_weights\", i32 64, i32 4}\n"
+        "{0}";
+    const char *SummaryString = "!llvm.module.flags = !{{!1}"
+                                "!1 = !{{i32 1, !\"ProfileSummary\", !2}"
+                                "!2 = !{{!3, !4, !5, !6, !7, !8, !9, !10}"
+                                "!3 = !{{!\"ProfileFormat\", !\"{0}\"}"
+                                "!4 = !{{!\"TotalCount\", i64 10000}"
+                                "!5 = !{{!\"MaxCount\", i64 10}"
+                                "!6 = !{{!\"MaxInternalCount\", i64 1}"
+                                "!7 = !{{!\"MaxFunctionCount\", i64 1000}"
+                                "!8 = !{{!\"NumCounts\", i64 3}"
+                                "!9 = !{{!\"NumFunctions\", i64 3}"
+                                "!10 = !{{!\"DetailedSummary\", !11}"
+                                "!11 = !{{!12, !13, !14}"
+                                "!12 = !{{i32 10000, i64 1000, i32 1}"
+                                "!13 = !{{i32 999000, i64 300, i32 3}"
+                                "!14 = !{{i32 999999, i64 5, i32 10}";
+    SMDiagnostic Err;
+    if (ProfKind)
+      return parseAssemblyString(
+          formatv(ModuleString, formatv(SummaryString, ProfKind).str()).str(),
+          Err, C);
+    else
+      return parseAssemblyString(formatv(ModuleString, "").str(), Err, C);
+  }
+};
+
+TEST_F(ProfileSummaryInfoTest, TestNoProfile) {
+  auto M = makeLLVMModule(/*ProfKind=*/nullptr);
+  Function *F = M->getFunction("f");
+
+  ProfileSummaryInfo PSI = buildPSI(M.get());
+  EXPECT_FALSE(PSI.hasProfileSummary());
+  EXPECT_FALSE(PSI.hasSampleProfile());
+  EXPECT_FALSE(PSI.hasInstrumentationProfile());
+  // In the absence of profiles, is{Hot|Cold}X methods should always return
+  // false.
+  EXPECT_FALSE(PSI.isHotCount(1000));
+  EXPECT_FALSE(PSI.isHotCount(0));
+  EXPECT_FALSE(PSI.isColdCount(1000));
+  EXPECT_FALSE(PSI.isColdCount(0));
+
+  EXPECT_FALSE(PSI.isFunctionEntryHot(F));
+  EXPECT_FALSE(PSI.isFunctionEntryCold(F));
+
+  BasicBlock &BB0 = F->getEntryBlock();
+  BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+
+  BlockFrequencyInfo BFI = buildBFI(*F);
+  EXPECT_FALSE(PSI.isHotBlock(&BB0, &BFI));
+  EXPECT_FALSE(PSI.isColdBlock(&BB0, &BFI));
+
+  CallSite CS1(BB1->getFirstNonPHI());
+  EXPECT_FALSE(PSI.isHotCallSite(CS1, &BFI));
+  EXPECT_FALSE(PSI.isColdCallSite(CS1, &BFI));
+}
+TEST_F(ProfileSummaryInfoTest, TestCommon) {
+  auto M = makeLLVMModule("InstrProf");
+  Function *F = M->getFunction("f");
+  Function *G = M->getFunction("g");
+  Function *H = M->getFunction("h");
+
+  ProfileSummaryInfo PSI = buildPSI(M.get());
+  EXPECT_TRUE(PSI.hasProfileSummary());
+  EXPECT_TRUE(PSI.isHotCount(400));
+  EXPECT_TRUE(PSI.isColdCount(2));
+  EXPECT_FALSE(PSI.isColdCount(100));
+  EXPECT_FALSE(PSI.isHotCount(100));
+
+  EXPECT_TRUE(PSI.isFunctionEntryHot(F));
+  EXPECT_FALSE(PSI.isFunctionEntryHot(G));
+  EXPECT_FALSE(PSI.isFunctionEntryHot(H));
+}
+
+TEST_F(ProfileSummaryInfoTest, InstrProf) {
+  auto M = makeLLVMModule("InstrProf");
+  Function *F = M->getFunction("f");
+  ProfileSummaryInfo PSI = buildPSI(M.get());
+  EXPECT_TRUE(PSI.hasProfileSummary());
+  EXPECT_TRUE(PSI.hasInstrumentationProfile());
+
+  BasicBlock &BB0 = F->getEntryBlock();
+  BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+  BasicBlock *BB2 = BB0.getTerminator()->getSuccessor(1);
+  BasicBlock *BB3 = BB1->getSingleSuccessor();
+
+  BlockFrequencyInfo BFI = buildBFI(*F);
+  EXPECT_TRUE(PSI.isHotBlock(&BB0, &BFI));
+  EXPECT_TRUE(PSI.isHotBlock(BB1, &BFI));
+  EXPECT_FALSE(PSI.isHotBlock(BB2, &BFI));
+  EXPECT_TRUE(PSI.isHotBlock(BB3, &BFI));
+
+  CallSite CS1(BB1->getFirstNonPHI());
+  auto *CI2 = BB2->getFirstNonPHI();
+  CallSite CS2(CI2);
+
+  EXPECT_TRUE(PSI.isHotCallSite(CS1, &BFI));
+  EXPECT_FALSE(PSI.isHotCallSite(CS2, &BFI));
+
+  // Test that adding an MD_prof metadata with a hot count on CS2 does not
+  // change its hotness as it has no effect in instrumented profiling.
+  MDBuilder MDB(M->getContext());
+  CI2->setMetadata(llvm::LLVMContext::MD_prof, MDB.createBranchWeights({400}));
+  EXPECT_FALSE(PSI.isHotCallSite(CS2, &BFI));
+}
+
+TEST_F(ProfileSummaryInfoTest, SampleProf) {
+  auto M = makeLLVMModule("SampleProfile");
+  Function *F = M->getFunction("f");
+  ProfileSummaryInfo PSI = buildPSI(M.get());
+  EXPECT_TRUE(PSI.hasProfileSummary());
+  EXPECT_TRUE(PSI.hasSampleProfile());
+
+  BasicBlock &BB0 = F->getEntryBlock();
+  BasicBlock *BB1 = BB0.getTerminator()->getSuccessor(0);
+  BasicBlock *BB2 = BB0.getTerminator()->getSuccessor(1);
+  BasicBlock *BB3 = BB1->getSingleSuccessor();
+
+  BlockFrequencyInfo BFI = buildBFI(*F);
+  EXPECT_TRUE(PSI.isHotBlock(&BB0, &BFI));
+  EXPECT_TRUE(PSI.isHotBlock(BB1, &BFI));
+  EXPECT_FALSE(PSI.isHotBlock(BB2, &BFI));
+  EXPECT_TRUE(PSI.isHotBlock(BB3, &BFI));
+
+  CallSite CS1(BB1->getFirstNonPHI());
+  auto *CI2 = BB2->getFirstNonPHI();
+  // Manually attach branch weights metadata to the call instruction.
+  SmallVector<uint32_t, 1> Weights;
+  Weights.push_back(1000);
+  MDBuilder MDB(M->getContext());
+  CI2->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
+  CallSite CS2(CI2);
+
+  EXPECT_FALSE(PSI.isHotCallSite(CS1, &BFI));
+  EXPECT_TRUE(PSI.isHotCallSite(CS2, &BFI));
+
+  // Test that CS2 is considered hot when it gets an MD_prof metadata with
+  // weights that exceed the hot count threshold.
+  CI2->setMetadata(llvm::LLVMContext::MD_prof, MDB.createBranchWeights({400}));
+  EXPECT_TRUE(PSI.isHotCallSite(CS2, &BFI));
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/src/llvm-project/llvm/unittests/Analysis/ScalarEvolutionTest.cpp b/src/llvm-project/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
new file mode 100644
index 0000000..3da0614bb
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/ScalarEvolutionTest.cpp
@@ -0,0 +1,1394 @@
+//===- ScalarEvolutionsTest.cpp - ScalarEvolution unit tests --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/ScalarEvolutionExpander.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+// We use this fixture to ensure that we clean up ScalarEvolution before
+// deleting the PassManager.
+class ScalarEvolutionsTest : public testing::Test {
+protected:
+  LLVMContext Context;
+  Module M;
+  TargetLibraryInfoImpl TLII;
+  TargetLibraryInfo TLI;
+
+  std::unique_ptr<AssumptionCache> AC;
+  std::unique_ptr<DominatorTree> DT;
+  std::unique_ptr<LoopInfo> LI;
+
+  ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {}
+
+  ScalarEvolution buildSE(Function &F) {
+    AC.reset(new AssumptionCache(F));
+    DT.reset(new DominatorTree(F));
+    LI.reset(new LoopInfo(*DT));
+    return ScalarEvolution(F, TLI, *AC, *DT, *LI);
+  }
+
+  void runWithSE(
+      Module &M, StringRef FuncName,
+      function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
+    auto *F = M.getFunction(FuncName);
+    ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
+    ScalarEvolution SE = buildSE(*F);
+    Test(*F, *LI, SE);
+  }
+};
+
+TEST_F(ScalarEvolutionsTest, SCEVUnknownRAUW) {
+  FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
+                                              std::vector<Type *>(), false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
+  ReturnInst::Create(Context, nullptr, BB);
+
+  Type *Ty = Type::getInt1Ty(Context);
+  Constant *Init = Constant::getNullValue(Ty);
+  Value *V0 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V0");
+  Value *V1 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V1");
+  Value *V2 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V2");
+
+  ScalarEvolution SE = buildSE(*F);
+
+  const SCEV *S0 = SE.getSCEV(V0);
+  const SCEV *S1 = SE.getSCEV(V1);
+  const SCEV *S2 = SE.getSCEV(V2);
+
+  const SCEV *P0 = SE.getAddExpr(S0, S0);
+  const SCEV *P1 = SE.getAddExpr(S1, S1);
+  const SCEV *P2 = SE.getAddExpr(S2, S2);
+
+  const SCEVMulExpr *M0 = cast<SCEVMulExpr>(P0);
+  const SCEVMulExpr *M1 = cast<SCEVMulExpr>(P1);
+  const SCEVMulExpr *M2 = cast<SCEVMulExpr>(P2);
+
+  EXPECT_EQ(cast<SCEVConstant>(M0->getOperand(0))->getValue()->getZExtValue(),
+            2u);
+  EXPECT_EQ(cast<SCEVConstant>(M1->getOperand(0))->getValue()->getZExtValue(),
+            2u);
+  EXPECT_EQ(cast<SCEVConstant>(M2->getOperand(0))->getValue()->getZExtValue(),
+            2u);
+
+  // Before the RAUWs, these are all pointing to separate values.
+  EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
+  EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V1);
+  EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V2);
+
+  // Do some RAUWs.
+  V2->replaceAllUsesWith(V1);
+  V1->replaceAllUsesWith(V0);
+
+  // After the RAUWs, these should all be pointing to V0.
+  EXPECT_EQ(cast<SCEVUnknown>(M0->getOperand(1))->getValue(), V0);
+  EXPECT_EQ(cast<SCEVUnknown>(M1->getOperand(1))->getValue(), V0);
+  EXPECT_EQ(cast<SCEVUnknown>(M2->getOperand(1))->getValue(), V0);
+}
+
+TEST_F(ScalarEvolutionsTest, SimplifiedPHI) {
+  FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
+                                              std::vector<Type *>(), false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
+  BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
+  BranchInst::Create(LoopBB, EntryBB);
+  BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
+                     LoopBB);
+  ReturnInst::Create(Context, nullptr, ExitBB);
+  auto *Ty = Type::getInt32Ty(Context);
+  auto *PN = PHINode::Create(Ty, 2, "", &*LoopBB->begin());
+  PN->addIncoming(Constant::getNullValue(Ty), EntryBB);
+  PN->addIncoming(UndefValue::get(Ty), LoopBB);
+  ScalarEvolution SE = buildSE(*F);
+  auto *S1 = SE.getSCEV(PN);
+  auto *S2 = SE.getSCEV(PN);
+  auto *ZeroConst = SE.getConstant(Ty, 0);
+
+  // At some point, only the first call to getSCEV returned the simplified
+  // SCEVConstant and later calls just returned a SCEVUnknown referencing the
+  // PHI node.
+  EXPECT_EQ(S1, ZeroConst);
+  EXPECT_EQ(S1, S2);
+}
+
+TEST_F(ScalarEvolutionsTest, ExpandPtrTypeSCEV) {
+  // It is to test the fix for PR30213. It exercises the branch in scev
+  // expansion when the value in ValueOffsetPair is a ptr and the offset
+  // is not divisible by the elem type size of value.
+  auto *I8Ty = Type::getInt8Ty(Context);
+  auto *I8PtrTy = Type::getInt8PtrTy(Context);
+  auto *I32Ty = Type::getInt32Ty(Context);
+  auto *I32PtrTy = Type::getInt32PtrTy(Context);
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
+  BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
+  BranchInst::Create(LoopBB, EntryBB);
+  ReturnInst::Create(Context, nullptr, ExitBB);
+
+  // loop:                            ; preds = %loop, %entry
+  //   %alloca = alloca i32
+  //   %gep0 = getelementptr i32, i32* %alloca, i32 1
+  //   %bitcast1 = bitcast i32* %gep0 to i8*
+  //   %gep1 = getelementptr i8, i8* %bitcast1, i32 1
+  //   %gep2 = getelementptr i8, i8* undef, i32 1
+  //   %cmp = icmp ult i8* undef, %bitcast1
+  //   %select = select i1 %cmp, i8* %gep1, i8* %gep2
+  //   %bitcast2 = bitcast i8* %select to i32*
+  //   br i1 undef, label %loop, label %exit
+
+  const DataLayout &DL = F->getParent()->getDataLayout();
+  BranchInst *Br = BranchInst::Create(
+      LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
+  AllocaInst *Alloca = new AllocaInst(I32Ty, DL.getAllocaAddrSpace(),
+                                      "alloca", Br);
+  ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
+  GetElementPtrInst *Gep0 =
+      GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
+  CastInst *CastA =
+      CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
+  GetElementPtrInst *Gep1 =
+      GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
+  GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
+      I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
+  CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
+                                 UndefValue::get(I8PtrTy), CastA, "cmp", Br);
+  SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
+  CastInst *CastB =
+      CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
+
+  ScalarEvolution SE = buildSE(*F);
+  auto *S = SE.getSCEV(CastB);
+  SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+  Value *V =
+      Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
+
+  // Expect the expansion code contains:
+  //   %0 = bitcast i32* %bitcast2 to i8*
+  //   %uglygep = getelementptr i8, i8* %0, i64 -1
+  //   %1 = bitcast i8* %uglygep to i32*
+  EXPECT_TRUE(isa<BitCastInst>(V));
+  Instruction *Gep = cast<Instruction>(V)->getPrevNode();
+  EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
+  EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
+  EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
+  EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
+}
+
+static Instruction *getInstructionByName(Function &F, StringRef Name) {
+  for (auto &I : instructions(F))
+    if (I.getName() == Name)
+      return &I;
+  llvm_unreachable("Expected to find instruction!");
+}
+
+TEST_F(ScalarEvolutionsTest, CommutativeExprOperandOrder) {
+  LLVMContext C;
+  SMDiagnostic Err;
+  std::unique_ptr<Module> M = parseAssemblyString(
+      "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+      " "
+      "@var_0 = external global i32, align 4"
+      "@var_1 = external global i32, align 4"
+      "@var_2 = external global i32, align 4"
+      " "
+      "declare i32 @unknown(i32, i32, i32)"
+      " "
+      "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
+      "    local_unnamed_addr { "
+      "entry: "
+      "  %entrycond = icmp sgt i32 %n, 0 "
+      "  br i1 %entrycond, label %loop.ph, label %for.end "
+      " "
+      "loop.ph: "
+      "  %a = load i32, i32* %A, align 4 "
+      "  %b = load i32, i32* %B, align 4 "
+      "  %mul = mul nsw i32 %b, %a "
+      "  %iv0.init = getelementptr inbounds i8, i8* %arr, i32 %mul "
+      "  br label %loop "
+      " "
+      "loop: "
+      "  %iv0 = phi i8* [ %iv0.inc, %loop ], [ %iv0.init, %loop.ph ] "
+      "  %iv1 = phi i32 [ %iv1.inc, %loop ], [ 0, %loop.ph ] "
+      "  %conv = trunc i32 %iv1 to i8 "
+      "  store i8 %conv, i8* %iv0, align 1 "
+      "  %iv0.inc = getelementptr inbounds i8, i8* %iv0, i32 %b "
+      "  %iv1.inc = add nuw nsw i32 %iv1, 1 "
+      "  %exitcond = icmp eq i32 %iv1.inc, %n "
+      "  br i1 %exitcond, label %for.end.loopexit, label %loop "
+      " "
+      "for.end.loopexit: "
+      "  br label %for.end "
+      " "
+      "for.end: "
+      "  ret void "
+      "} "
+      " "
+      "define void @f_2(i32* %X, i32* %Y, i32* %Z) { "
+      "  %x = load i32, i32* %X "
+      "  %y = load i32, i32* %Y "
+      "  %z = load i32, i32* %Z "
+      "  ret void "
+      "} "
+      " "
+      "define void @f_3() { "
+      "  %x = load i32, i32* @var_0"
+      "  %y = load i32, i32* @var_1"
+      "  %z = load i32, i32* @var_2"
+      "  ret void"
+      "} "
+      " "
+      "define void @f_4(i32 %a, i32 %b, i32 %c) { "
+      "  %x = call i32 @unknown(i32 %a, i32 %b, i32 %c)"
+      "  %y = call i32 @unknown(i32 %b, i32 %c, i32 %a)"
+      "  %z = call i32 @unknown(i32 %c, i32 %a, i32 %b)"
+      "  ret void"
+      "} "
+      ,
+      Err, C);
+
+  assert(M && "Could not parse module?");
+  assert(!verifyModule(*M) && "Must have been well formed!");
+
+  runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+    auto *IV0 = getInstructionByName(F, "iv0");
+    auto *IV0Inc = getInstructionByName(F, "iv0.inc");
+
+    auto *FirstExprForIV0 = SE.getSCEV(IV0);
+    auto *FirstExprForIV0Inc = SE.getSCEV(IV0Inc);
+    auto *SecondExprForIV0 = SE.getSCEV(IV0);
+
+    EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0));
+    EXPECT_TRUE(isa<SCEVAddRecExpr>(FirstExprForIV0Inc));
+    EXPECT_TRUE(isa<SCEVAddRecExpr>(SecondExprForIV0));
+  });
+
+  auto CheckCommutativeMulExprs = [&](ScalarEvolution &SE, const SCEV *A,
+                                      const SCEV *B, const SCEV *C) {
+    EXPECT_EQ(SE.getMulExpr(A, B), SE.getMulExpr(B, A));
+    EXPECT_EQ(SE.getMulExpr(B, C), SE.getMulExpr(C, B));
+    EXPECT_EQ(SE.getMulExpr(A, C), SE.getMulExpr(C, A));
+
+    SmallVector<const SCEV *, 3> Ops0 = {A, B, C};
+    SmallVector<const SCEV *, 3> Ops1 = {A, C, B};
+    SmallVector<const SCEV *, 3> Ops2 = {B, A, C};
+    SmallVector<const SCEV *, 3> Ops3 = {B, C, A};
+    SmallVector<const SCEV *, 3> Ops4 = {C, B, A};
+    SmallVector<const SCEV *, 3> Ops5 = {C, A, B};
+
+    auto *Mul0 = SE.getMulExpr(Ops0);
+    auto *Mul1 = SE.getMulExpr(Ops1);
+    auto *Mul2 = SE.getMulExpr(Ops2);
+    auto *Mul3 = SE.getMulExpr(Ops3);
+    auto *Mul4 = SE.getMulExpr(Ops4);
+    auto *Mul5 = SE.getMulExpr(Ops5);
+
+    EXPECT_EQ(Mul0, Mul1) << "Expected " << *Mul0 << " == " << *Mul1;
+    EXPECT_EQ(Mul1, Mul2) << "Expected " << *Mul1 << " == " << *Mul2;
+    EXPECT_EQ(Mul2, Mul3) << "Expected " << *Mul2 << " == " << *Mul3;
+    EXPECT_EQ(Mul3, Mul4) << "Expected " << *Mul3 << " == " << *Mul4;
+    EXPECT_EQ(Mul4, Mul5) << "Expected " << *Mul4 << " == " << *Mul5;
+  };
+
+  for (StringRef FuncName : {"f_2", "f_3", "f_4"})
+    runWithSE(
+        *M, FuncName, [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+          CheckCommutativeMulExprs(SE, SE.getSCEV(getInstructionByName(F, "x")),
+                                   SE.getSCEV(getInstructionByName(F, "y")),
+                                   SE.getSCEV(getInstructionByName(F, "z")));
+        });
+}
+
+TEST_F(ScalarEvolutionsTest, CompareSCEVComplexity) {
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *LoopBB = BasicBlock::Create(Context, "bb1", F);
+  BranchInst::Create(LoopBB, EntryBB);
+
+  auto *Ty = Type::getInt32Ty(Context);
+  SmallVector<Instruction*, 8> Muls(8), Acc(8), NextAcc(8);
+
+  Acc[0] = PHINode::Create(Ty, 2, "", LoopBB);
+  Acc[1] = PHINode::Create(Ty, 2, "", LoopBB);
+  Acc[2] = PHINode::Create(Ty, 2, "", LoopBB);
+  Acc[3] = PHINode::Create(Ty, 2, "", LoopBB);
+  Acc[4] = PHINode::Create(Ty, 2, "", LoopBB);
+  Acc[5] = PHINode::Create(Ty, 2, "", LoopBB);
+  Acc[6] = PHINode::Create(Ty, 2, "", LoopBB);
+  Acc[7] = PHINode::Create(Ty, 2, "", LoopBB);
+
+  for (int i = 0; i < 20; i++) {
+    Muls[0] = BinaryOperator::CreateMul(Acc[0], Acc[0], "", LoopBB);
+    NextAcc[0] = BinaryOperator::CreateAdd(Muls[0], Acc[4], "", LoopBB);
+    Muls[1] = BinaryOperator::CreateMul(Acc[1], Acc[1], "", LoopBB);
+    NextAcc[1] = BinaryOperator::CreateAdd(Muls[1], Acc[5], "", LoopBB);
+    Muls[2] = BinaryOperator::CreateMul(Acc[2], Acc[2], "", LoopBB);
+    NextAcc[2] = BinaryOperator::CreateAdd(Muls[2], Acc[6], "", LoopBB);
+    Muls[3] = BinaryOperator::CreateMul(Acc[3], Acc[3], "", LoopBB);
+    NextAcc[3] = BinaryOperator::CreateAdd(Muls[3], Acc[7], "", LoopBB);
+
+    Muls[4] = BinaryOperator::CreateMul(Acc[4], Acc[4], "", LoopBB);
+    NextAcc[4] = BinaryOperator::CreateAdd(Muls[4], Acc[0], "", LoopBB);
+    Muls[5] = BinaryOperator::CreateMul(Acc[5], Acc[5], "", LoopBB);
+    NextAcc[5] = BinaryOperator::CreateAdd(Muls[5], Acc[1], "", LoopBB);
+    Muls[6] = BinaryOperator::CreateMul(Acc[6], Acc[6], "", LoopBB);
+    NextAcc[6] = BinaryOperator::CreateAdd(Muls[6], Acc[2], "", LoopBB);
+    Muls[7] = BinaryOperator::CreateMul(Acc[7], Acc[7], "", LoopBB);
+    NextAcc[7] = BinaryOperator::CreateAdd(Muls[7], Acc[3], "", LoopBB);
+    Acc = NextAcc;
+  }
+
+  auto II = LoopBB->begin();
+  for (int i = 0; i < 8; i++) {
+    PHINode *Phi = cast<PHINode>(&*II++);
+    Phi->addIncoming(Acc[i], LoopBB);
+    Phi->addIncoming(UndefValue::get(Ty), EntryBB);
+  }
+
+  BasicBlock *ExitBB = BasicBlock::Create(Context, "bb2", F);
+  BranchInst::Create(LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)),
+                     LoopBB);
+
+  Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
+  Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
+  Acc[2] = BinaryOperator::CreateAdd(Acc[4], Acc[5], "", ExitBB);
+  Acc[3] = BinaryOperator::CreateAdd(Acc[6], Acc[7], "", ExitBB);
+  Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
+  Acc[1] = BinaryOperator::CreateAdd(Acc[2], Acc[3], "", ExitBB);
+  Acc[0] = BinaryOperator::CreateAdd(Acc[0], Acc[1], "", ExitBB);
+
+  ReturnInst::Create(Context, nullptr, ExitBB);
+
+  ScalarEvolution SE = buildSE(*F);
+
+  EXPECT_NE(nullptr, SE.getSCEV(Acc[0]));
+}
+
+TEST_F(ScalarEvolutionsTest, CompareValueComplexity) {
+  IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(Context);
+  PointerType *IntPtrPtrTy = IntPtrTy->getPointerTo();
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), {IntPtrTy, IntPtrTy}, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+
+  Value *X = &*F->arg_begin();
+  Value *Y = &*std::next(F->arg_begin());
+
+  const int ValueDepth = 10;
+  for (int i = 0; i < ValueDepth; i++) {
+    X = new LoadInst(new IntToPtrInst(X, IntPtrPtrTy, "", EntryBB), "",
+                     /*isVolatile*/ false, EntryBB);
+    Y = new LoadInst(new IntToPtrInst(Y, IntPtrPtrTy, "", EntryBB), "",
+                     /*isVolatile*/ false, EntryBB);
+  }
+
+  auto *MulA = BinaryOperator::CreateMul(X, Y, "", EntryBB);
+  auto *MulB = BinaryOperator::CreateMul(Y, X, "", EntryBB);
+  ReturnInst::Create(Context, nullptr, EntryBB);
+
+  // This test isn't checking for correctness.  Today making A and B resolve to
+  // the same SCEV would require deeper searching in CompareValueComplexity,
+  // which will slow down compilation.  However, this test can fail (with LLVM's
+  // behavior still being correct) if we ever have a smarter
+  // CompareValueComplexity that is both fast and more accurate.
+
+  ScalarEvolution SE = buildSE(*F);
+  auto *A = SE.getSCEV(MulA);
+  auto *B = SE.getSCEV(MulB);
+  EXPECT_NE(A, B);
+}
+
+TEST_F(ScalarEvolutionsTest, SCEVAddExpr) {
+  Type *Ty32 = Type::getInt32Ty(Context);
+  Type *ArgTys[] = {Type::getInt64Ty(Context), Ty32};
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+
+  Argument *A1 = &*F->arg_begin();
+  Argument *A2 = &*(std::next(F->arg_begin()));
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+
+  Instruction *Trunc = CastInst::CreateTruncOrBitCast(A1, Ty32, "", EntryBB);
+  Instruction *Mul1 = BinaryOperator::CreateMul(Trunc, A2, "", EntryBB);
+  Instruction *Add1 = BinaryOperator::CreateAdd(Mul1, Trunc, "", EntryBB);
+  Mul1 = BinaryOperator::CreateMul(Add1, Trunc, "", EntryBB);
+  Instruction *Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
+  // FIXME: The size of this is arbitrary and doesn't seem to change the
+  // result, but SCEV will do quadratic work for these so a large number here
+  // will be extremely slow. We should revisit what and how this is testing
+  // SCEV.
+  for (int i = 0; i < 10; i++) {
+    Mul1 = BinaryOperator::CreateMul(Add2, Add1, "", EntryBB);
+    Add1 = Add2;
+    Add2 = BinaryOperator::CreateAdd(Mul1, Add1, "", EntryBB);
+  }
+
+  ReturnInst::Create(Context, nullptr, EntryBB);
+  ScalarEvolution SE = buildSE(*F);
+  EXPECT_NE(nullptr, SE.getSCEV(Mul1));
+}
+
+static Instruction &GetInstByName(Function &F, StringRef Name) {
+  for (auto &I : instructions(F))
+    if (I.getName() == Name)
+      return I;
+  llvm_unreachable("Could not find instructions!");
+}
+
+TEST_F(ScalarEvolutionsTest, SCEVNormalization) {
+  LLVMContext C;
+  SMDiagnostic Err;
+  std::unique_ptr<Module> M = parseAssemblyString(
+      "target datalayout = \"e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128\" "
+      " "
+      "@var_0 = external global i32, align 4"
+      "@var_1 = external global i32, align 4"
+      "@var_2 = external global i32, align 4"
+      " "
+      "declare i32 @unknown(i32, i32, i32)"
+      " "
+      "define void @f_1(i8* nocapture %arr, i32 %n, i32* %A, i32* %B) "
+      "    local_unnamed_addr { "
+      "entry: "
+      "  br label %loop.ph "
+      " "
+      "loop.ph: "
+      "  br label %loop "
+      " "
+      "loop: "
+      "  %iv0 = phi i32 [ %iv0.inc, %loop ], [ 0, %loop.ph ] "
+      "  %iv1 = phi i32 [ %iv1.inc, %loop ], [ -2147483648, %loop.ph ] "
+      "  %iv0.inc = add i32 %iv0, 1 "
+      "  %iv1.inc = add i32 %iv1, 3 "
+      "  br i1 undef, label %for.end.loopexit, label %loop "
+      " "
+      "for.end.loopexit: "
+      "  ret void "
+      "} "
+      " "
+      "define void @f_2(i32 %a, i32 %b, i32 %c, i32 %d) "
+      "    local_unnamed_addr { "
+      "entry: "
+      "  br label %loop_0 "
+      " "
+      "loop_0: "
+      "  br i1 undef, label %loop_0, label %loop_1 "
+      " "
+      "loop_1: "
+      "  br i1 undef, label %loop_2, label %loop_1 "
+      " "
+      " "
+      "loop_2: "
+      "  br i1 undef, label %end, label %loop_2 "
+      " "
+      "end: "
+      "  ret void "
+      "} "
+      ,
+      Err, C);
+
+  assert(M && "Could not parse module?");
+  assert(!verifyModule(*M) && "Must have been well formed!");
+
+  runWithSE(*M, "f_1", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+    auto &I0 = GetInstByName(F, "iv0");
+    auto &I1 = *I0.getNextNode();
+
+    auto *S0 = cast<SCEVAddRecExpr>(SE.getSCEV(&I0));
+    PostIncLoopSet Loops;
+    Loops.insert(S0->getLoop());
+    auto *N0 = normalizeForPostIncUse(S0, Loops, SE);
+    auto *D0 = denormalizeForPostIncUse(N0, Loops, SE);
+    EXPECT_EQ(S0, D0) << *S0 << " " << *D0;
+
+    auto *S1 = cast<SCEVAddRecExpr>(SE.getSCEV(&I1));
+    Loops.clear();
+    Loops.insert(S1->getLoop());
+    auto *N1 = normalizeForPostIncUse(S1, Loops, SE);
+    auto *D1 = denormalizeForPostIncUse(N1, Loops, SE);
+    EXPECT_EQ(S1, D1) << *S1 << " " << *D1;
+  });
+
+  runWithSE(*M, "f_2", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
+    auto *L2 = *LI.begin();
+    auto *L1 = *std::next(LI.begin());
+    auto *L0 = *std::next(LI.begin(), 2);
+
+    auto GetAddRec = [&SE](const Loop *L, std::initializer_list<const SCEV *> Ops) {
+      SmallVector<const SCEV *, 4> OpsCopy(Ops);
+      return SE.getAddRecExpr(OpsCopy, L, SCEV::FlagAnyWrap);
+    };
+
+    auto GetAdd = [&SE](std::initializer_list<const SCEV *> Ops) {
+      SmallVector<const SCEV *, 4> OpsCopy(Ops);
+      return SE.getAddExpr(OpsCopy, SCEV::FlagAnyWrap);
+    };
+
+    // We first populate the AddRecs vector with a few "interesting" SCEV
+    // expressions, and then we go through the list and assert that each
+    // expression in it has an invertible normalization.
+
+    std::vector<const SCEV *> Exprs;
+    {
+      const SCEV *V0 = SE.getSCEV(&*F.arg_begin());
+      const SCEV *V1 = SE.getSCEV(&*std::next(F.arg_begin(), 1));
+      const SCEV *V2 = SE.getSCEV(&*std::next(F.arg_begin(), 2));
+      const SCEV *V3 = SE.getSCEV(&*std::next(F.arg_begin(), 3));
+
+      Exprs.push_back(GetAddRec(L0, {V0}));             // 0
+      Exprs.push_back(GetAddRec(L0, {V0, V1}));         // 1
+      Exprs.push_back(GetAddRec(L0, {V0, V1, V2}));     // 2
+      Exprs.push_back(GetAddRec(L0, {V0, V1, V2, V3})); // 3
+
+      Exprs.push_back(
+          GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[3], Exprs[0]})); // 4
+      Exprs.push_back(
+          GetAddRec(L1, {Exprs[1], Exprs[2], Exprs[0], Exprs[3]})); // 5
+      Exprs.push_back(
+          GetAddRec(L1, {Exprs[1], Exprs[3], Exprs[3], Exprs[1]})); // 6
+
+      Exprs.push_back(GetAdd({Exprs[6], Exprs[3], V2})); // 7
+
+      Exprs.push_back(
+          GetAddRec(L2, {Exprs[4], Exprs[3], Exprs[3], Exprs[5]})); // 8
+
+      Exprs.push_back(
+          GetAddRec(L2, {Exprs[4], Exprs[6], Exprs[7], Exprs[3], V0})); // 9
+    }
+
+    std::vector<PostIncLoopSet> LoopSets;
+    for (int i = 0; i < 8; i++) {
+      LoopSets.emplace_back();
+      if (i & 1)
+        LoopSets.back().insert(L0);
+      if (i & 2)
+        LoopSets.back().insert(L1);
+      if (i & 4)
+        LoopSets.back().insert(L2);
+    }
+
+    for (const auto &LoopSet : LoopSets)
+      for (auto *S : Exprs) {
+        {
+          auto *N = llvm::normalizeForPostIncUse(S, LoopSet, SE);
+          auto *D = llvm::denormalizeForPostIncUse(N, LoopSet, SE);
+
+          // Normalization and then denormalizing better give us back the same
+          // value.
+          EXPECT_EQ(S, D) << "S = " << *S << "  D = " << *D << " N = " << *N;
+        }
+        {
+          auto *D = llvm::denormalizeForPostIncUse(S, LoopSet, SE);
+          auto *N = llvm::normalizeForPostIncUse(D, LoopSet, SE);
+
+          // Denormalization and then normalizing better give us back the same
+          // value.
+          EXPECT_EQ(S, N) << "S = " << *S << "  N = " << *N;
+        }
+      }
+  });
+}
+
+// Expect the call of getZeroExtendExpr will not cost exponential time.
+TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExpr) {
+  LLVMContext C;
+  SMDiagnostic Err;
+
+  // Generate a function like below:
+  // define void @foo() {
+  // entry:
+  //   br label %for.cond
+  //
+  // for.cond:
+  //   %0 = phi i64 [ 100, %entry ], [ %dec, %for.inc ]
+  //   %cmp = icmp sgt i64 %0, 90
+  //   br i1 %cmp, label %for.inc, label %for.cond1
+  //
+  // for.inc:
+  //   %dec = add nsw i64 %0, -1
+  //   br label %for.cond
+  //
+  // for.cond1:
+  //   %1 = phi i64 [ 100, %for.cond ], [ %dec5, %for.inc2 ]
+  //   %cmp3 = icmp sgt i64 %1, 90
+  //   br i1 %cmp3, label %for.inc2, label %for.cond4
+  //
+  // for.inc2:
+  //   %dec5 = add nsw i64 %1, -1
+  //   br label %for.cond1
+  //
+  // ......
+  //
+  // for.cond89:
+  //   %19 = phi i64 [ 100, %for.cond84 ], [ %dec94, %for.inc92 ]
+  //   %cmp93 = icmp sgt i64 %19, 90
+  //   br i1 %cmp93, label %for.inc92, label %for.end
+  //
+  // for.inc92:
+  //   %dec94 = add nsw i64 %19, -1
+  //   br label %for.cond89
+  //
+  // for.end:
+  //   %gep = getelementptr i8, i8* null, i64 %dec
+  //   %gep6 = getelementptr i8, i8* %gep, i64 %dec5
+  //   ......
+  //   %gep95 = getelementptr i8, i8* %gep91, i64 %dec94
+  //   ret void
+  // }
+  FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), {}, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("foo", FTy));
+
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *CondBB = BasicBlock::Create(Context, "for.cond", F);
+  BasicBlock *EndBB = BasicBlock::Create(Context, "for.end", F);
+  BranchInst::Create(CondBB, EntryBB);
+  BasicBlock *PrevBB = EntryBB;
+
+  Type *I64Ty = Type::getInt64Ty(Context);
+  Type *I8Ty = Type::getInt8Ty(Context);
+  Type *I8PtrTy = Type::getInt8PtrTy(Context);
+  Value *Accum = Constant::getNullValue(I8PtrTy);
+  int Iters = 20;
+  for (int i = 0; i < Iters; i++) {
+    BasicBlock *IncBB = BasicBlock::Create(Context, "for.inc", F, EndBB);
+    auto *PN = PHINode::Create(I64Ty, 2, "", CondBB);
+    PN->addIncoming(ConstantInt::get(Context, APInt(64, 100)), PrevBB);
+    auto *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_SGT, PN,
+                                ConstantInt::get(Context, APInt(64, 90)), "cmp",
+                                CondBB);
+    BasicBlock *NextBB;
+    if (i != Iters - 1)
+      NextBB = BasicBlock::Create(Context, "for.cond", F, EndBB);
+    else
+      NextBB = EndBB;
+    BranchInst::Create(IncBB, NextBB, Cmp, CondBB);
+    auto *Dec = BinaryOperator::CreateNSWAdd(
+        PN, ConstantInt::get(Context, APInt(64, -1)), "dec", IncBB);
+    PN->addIncoming(Dec, IncBB);
+    BranchInst::Create(CondBB, IncBB);
+
+    Accum = GetElementPtrInst::Create(I8Ty, Accum, PN, "gep", EndBB);
+
+    PrevBB = CondBB;
+    CondBB = NextBB;
+  }
+  ReturnInst::Create(Context, nullptr, EndBB);
+  ScalarEvolution SE = buildSE(*F);
+  const SCEV *S = SE.getSCEV(Accum);
+  Type *I128Ty = Type::getInt128Ty(Context);
+  SE.getZeroExtendExpr(S, I128Ty);
+}
+
+// Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
+TEST_F(ScalarEvolutionsTest, SCEVZeroExtendExprNonIntegral) {
+  /*
+   * Create the following code:
+   * func(i64 addrspace(10)* %arg)
+   * top:
+   *  br label %L.ph
+   * L.ph:
+   *  br label %L
+   * L:
+   *  %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
+   *  %add = add i64 %phi2, 1
+   *  br i1 undef, label %post, label %L2
+   * post:
+   *  %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
+   *  #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
+   *  ret void
+   *
+   * We will create the appropriate SCEV expression for %gep and expand it,
+   * then check that no inttoptr/ptrtoint instructions got inserted.
+   */
+
+  // Create a module with non-integral pointers in it's datalayout
+  Module NIM("nonintegral", Context);
+  std::string DataLayout = M.getDataLayoutStr();
+  if (!DataLayout.empty())
+    DataLayout += "-";
+  DataLayout += "ni:10";
+  NIM.setDataLayout(DataLayout);
+
+  Type *T_int1 = Type::getInt1Ty(Context);
+  Type *T_int64 = Type::getInt64Ty(Context);
+  Type *T_pint64 = T_int64->getPointerTo(10);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
+  Function *F = cast<Function>(NIM.getOrInsertFunction("foo", FTy));
+
+  Argument *Arg = &*F->arg_begin();
+
+  BasicBlock *Top = BasicBlock::Create(Context, "top", F);
+  BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
+  BasicBlock *L = BasicBlock::Create(Context, "L", F);
+  BasicBlock *Post = BasicBlock::Create(Context, "post", F);
+
+  IRBuilder<> Builder(Top);
+  Builder.CreateBr(LPh);
+
+  Builder.SetInsertPoint(LPh);
+  Builder.CreateBr(L);
+
+  Builder.SetInsertPoint(L);
+  PHINode *Phi = Builder.CreatePHI(T_int64, 2);
+  Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
+  Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
+  Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
+  Phi->addIncoming(Add, L);
+
+  Builder.SetInsertPoint(Post);
+  Value *GepBase = Builder.CreateGEP(Arg, ConstantInt::get(T_int64, 1));
+  Instruction *Ret = Builder.CreateRetVoid();
+
+  ScalarEvolution SE = buildSE(*F);
+  auto *AddRec =
+      SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
+                       LI->getLoopFor(L), SCEV::FlagNUW);
+
+  SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
+  Exp.disableCanonicalMode();
+  Exp.expandCodeFor(AddRec, T_pint64, Ret);
+
+  // Make sure none of the instructions inserted were inttoptr/ptrtoint.
+  // The verifier will check this.
+  EXPECT_FALSE(verifyFunction(*F, &errs()));
+}
+
+// Make sure that SCEV invalidates exit limits after invalidating the values it
+// depends on when we forget a loop.
+TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetLoop) {
+  /*
+   * Create the following code:
+   * func(i64 addrspace(10)* %arg)
+   * top:
+   *  br label %L.ph
+   * L.ph:
+   *  br label %L
+   * L:
+   *  %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
+   *  %add = add i64 %phi2, 1
+   *  %cond = icmp slt i64 %add, 1000; then becomes 2000.
+   *  br i1 %cond, label %post, label %L2
+   * post:
+   *  ret void
+   *
+   */
+
+  // Create a module with non-integral pointers in it's datalayout
+  Module NIM("nonintegral", Context);
+  std::string DataLayout = M.getDataLayoutStr();
+  if (!DataLayout.empty())
+    DataLayout += "-";
+  DataLayout += "ni:10";
+  NIM.setDataLayout(DataLayout);
+
+  Type *T_int64 = Type::getInt64Ty(Context);
+  Type *T_pint64 = T_int64->getPointerTo(10);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
+  Function *F = cast<Function>(NIM.getOrInsertFunction("foo", FTy));
+
+  BasicBlock *Top = BasicBlock::Create(Context, "top", F);
+  BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
+  BasicBlock *L = BasicBlock::Create(Context, "L", F);
+  BasicBlock *Post = BasicBlock::Create(Context, "post", F);
+
+  IRBuilder<> Builder(Top);
+  Builder.CreateBr(LPh);
+
+  Builder.SetInsertPoint(LPh);
+  Builder.CreateBr(L);
+
+  Builder.SetInsertPoint(L);
+  PHINode *Phi = Builder.CreatePHI(T_int64, 2);
+  auto *Add = cast<Instruction>(
+      Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
+  auto *Limit = ConstantInt::get(T_int64, 1000);
+  auto *Cond = cast<Instruction>(
+      Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
+  auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
+  Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
+  Phi->addIncoming(Add, L);
+
+  Builder.SetInsertPoint(Post);
+  Builder.CreateRetVoid();
+
+  ScalarEvolution SE = buildSE(*F);
+  auto *Loop = LI->getLoopFor(L);
+  const SCEV *EC = SE.getBackedgeTakenCount(Loop);
+  EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
+  EXPECT_TRUE(isa<SCEVConstant>(EC));
+  EXPECT_EQ(cast<SCEVConstant>(EC)->getAPInt().getLimitedValue(), 999u);
+
+  // The add recurrence {5,+,1} does not correspond to any PHI in the IR, and
+  // that is relevant to this test.
+  auto *Five = SE.getConstant(APInt(/*numBits=*/64, 5));
+  auto *AR =
+      SE.getAddRecExpr(Five, SE.getOne(T_int64), Loop, SCEV::FlagAnyWrap);
+  const SCEV *ARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
+  EXPECT_FALSE(isa<SCEVCouldNotCompute>(ARAtLoopExit));
+  EXPECT_TRUE(isa<SCEVConstant>(ARAtLoopExit));
+  EXPECT_EQ(cast<SCEVConstant>(ARAtLoopExit)->getAPInt().getLimitedValue(),
+            1004u);
+
+  SE.forgetLoop(Loop);
+  Br->eraseFromParent();
+  Cond->eraseFromParent();
+
+  Builder.SetInsertPoint(L);
+  auto *NewCond = Builder.CreateICmp(
+      ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
+  Builder.CreateCondBr(NewCond, L, Post);
+  const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
+  EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
+  EXPECT_TRUE(isa<SCEVConstant>(NewEC));
+  EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
+  const SCEV *NewARAtLoopExit = SE.getSCEVAtScope(AR, nullptr);
+  EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewARAtLoopExit));
+  EXPECT_TRUE(isa<SCEVConstant>(NewARAtLoopExit));
+  EXPECT_EQ(cast<SCEVConstant>(NewARAtLoopExit)->getAPInt().getLimitedValue(),
+            2004u);
+}
+
+// Make sure that SCEV invalidates exit limits after invalidating the values it
+// depends on when we forget a value.
+TEST_F(ScalarEvolutionsTest, SCEVExitLimitForgetValue) {
+  /*
+   * Create the following code:
+   * func(i64 addrspace(10)* %arg)
+   * top:
+   *  br label %L.ph
+   * L.ph:
+   *  %load = load i64 addrspace(10)* %arg
+   *  br label %L
+   * L:
+   *  %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
+   *  %add = add i64 %phi2, 1
+   *  %cond = icmp slt i64 %add, %load ; then becomes 2000.
+   *  br i1 %cond, label %post, label %L2
+   * post:
+   *  ret void
+   *
+   */
+
+  // Create a module with non-integral pointers in it's datalayout
+  Module NIM("nonintegral", Context);
+  std::string DataLayout = M.getDataLayoutStr();
+  if (!DataLayout.empty())
+    DataLayout += "-";
+  DataLayout += "ni:10";
+  NIM.setDataLayout(DataLayout);
+
+  Type *T_int64 = Type::getInt64Ty(Context);
+  Type *T_pint64 = T_int64->getPointerTo(10);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
+  Function *F = cast<Function>(NIM.getOrInsertFunction("foo", FTy));
+
+  Argument *Arg = &*F->arg_begin();
+
+  BasicBlock *Top = BasicBlock::Create(Context, "top", F);
+  BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
+  BasicBlock *L = BasicBlock::Create(Context, "L", F);
+  BasicBlock *Post = BasicBlock::Create(Context, "post", F);
+
+  IRBuilder<> Builder(Top);
+  Builder.CreateBr(LPh);
+
+  Builder.SetInsertPoint(LPh);
+  auto *Load = cast<Instruction>(Builder.CreateLoad(T_int64, Arg, "load"));
+  Builder.CreateBr(L);
+
+  Builder.SetInsertPoint(L);
+  PHINode *Phi = Builder.CreatePHI(T_int64, 2);
+  auto *Add = cast<Instruction>(
+      Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
+  auto *Cond = cast<Instruction>(
+      Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Load, "cond"));
+  auto *Br = cast<Instruction>(Builder.CreateCondBr(Cond, L, Post));
+  Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
+  Phi->addIncoming(Add, L);
+
+  Builder.SetInsertPoint(Post);
+  Builder.CreateRetVoid();
+
+  ScalarEvolution SE = buildSE(*F);
+  auto *Loop = LI->getLoopFor(L);
+  const SCEV *EC = SE.getBackedgeTakenCount(Loop);
+  EXPECT_FALSE(isa<SCEVCouldNotCompute>(EC));
+  EXPECT_FALSE(isa<SCEVConstant>(EC));
+
+  SE.forgetValue(Load);
+  Br->eraseFromParent();
+  Cond->eraseFromParent();
+  Load->eraseFromParent();
+
+  Builder.SetInsertPoint(L);
+  auto *NewCond = Builder.CreateICmp(
+      ICmpInst::ICMP_SLT, Add, ConstantInt::get(T_int64, 2000), "new.cond");
+  Builder.CreateCondBr(NewCond, L, Post);
+  const SCEV *NewEC = SE.getBackedgeTakenCount(Loop);
+  EXPECT_FALSE(isa<SCEVCouldNotCompute>(NewEC));
+  EXPECT_TRUE(isa<SCEVConstant>(NewEC));
+  EXPECT_EQ(cast<SCEVConstant>(NewEC)->getAPInt().getLimitedValue(), 1999u);
+}
+
+TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstants) {
+  // Reference: https://reviews.llvm.org/D37265
+  // Make sure that SCEV does not blow up when constructing an AddRec
+  // with predicates for a phi with the update pattern:
+  //  (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
+  // when either the initial value of the Phi or the InvariantAccum are
+  // constants that are too large to fit in an ix but are zero when truncated to
+  // ix.
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
+  Function *F = cast<Function>(M.getOrInsertFunction("addrecphitest", FTy));
+
+  /*
+    Create IR:
+    entry:
+     br label %loop
+    loop:
+     %0 = phi i64 [-9223372036854775808, %entry], [%3, %loop]
+     %1 = shl i64 %0, 32
+     %2 = ashr exact i64 %1, 32
+     %3 = add i64 %2, -9223372036854775808
+     br i1 undef, label %exit, label %loop
+    exit:
+     ret void
+   */
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
+  BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
+
+  // entry:
+  BranchInst::Create(LoopBB, EntryBB);
+  // loop:
+  auto *MinInt64 =
+      ConstantInt::get(Context, APInt(64, 0x8000000000000000U, true));
+  auto *Int64_32 = ConstantInt::get(Context, APInt(64, 32));
+  auto *Br = BranchInst::Create(
+      LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
+  auto *Phi = PHINode::Create(Type::getInt64Ty(Context), 2, "", Br);
+  auto *Shl = BinaryOperator::CreateShl(Phi, Int64_32, "", Br);
+  auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int64_32, "", Br);
+  auto *Add = BinaryOperator::CreateAdd(AShr, MinInt64, "", Br);
+  Phi->addIncoming(MinInt64, EntryBB);
+  Phi->addIncoming(Add, LoopBB);
+  // exit:
+  ReturnInst::Create(Context, nullptr, ExitBB);
+
+  // Make sure that SCEV doesn't blow up
+  ScalarEvolution SE = buildSE(*F);
+  SCEVUnionPredicate Preds;
+  const SCEV *Expr = SE.getSCEV(Phi);
+  EXPECT_NE(nullptr, Expr);
+  EXPECT_TRUE(isa<SCEVUnknown>(Expr));
+  auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
+}
+
+TEST_F(ScalarEvolutionsTest, SCEVAddRecFromPHIwithLargeConstantAccum) {
+  // Make sure that SCEV does not blow up when constructing an AddRec
+  // with predicates for a phi with the update pattern:
+  //  (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
+  // when the InvariantAccum is a constant that is too large to fit in an
+  // ix but are zero when truncated to ix, and the initial value of the
+  // phi is not a constant.
+  Type *Int32Ty = Type::getInt32Ty(Context);
+  SmallVector<Type *, 1> Types;
+  Types.push_back(Int32Ty);
+  FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("addrecphitest", FTy));
+
+  /*
+    Create IR:
+    define @addrecphitest(i32)
+    entry:
+     br label %loop
+    loop:
+     %1 = phi i32 [%0, %entry], [%4, %loop]
+     %2 = shl i32 %1, 16
+     %3 = ashr exact i32 %2, 16
+     %4 = add i32 %3, -2147483648
+     br i1 undef, label %exit, label %loop
+    exit:
+     ret void
+   */
+  BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
+  BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
+
+  // entry:
+  BranchInst::Create(LoopBB, EntryBB);
+  // loop:
+  auto *MinInt32 = ConstantInt::get(Context, APInt(32, 0x80000000U, true));
+  auto *Int32_16 = ConstantInt::get(Context, APInt(32, 16));
+  auto *Br = BranchInst::Create(
+      LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
+  auto *Phi = PHINode::Create(Int32Ty, 2, "", Br);
+  auto *Shl = BinaryOperator::CreateShl(Phi, Int32_16, "", Br);
+  auto *AShr = BinaryOperator::CreateExactAShr(Shl, Int32_16, "", Br);
+  auto *Add = BinaryOperator::CreateAdd(AShr, MinInt32, "", Br);
+  auto *Arg = &*(F->arg_begin());
+  Phi->addIncoming(Arg, EntryBB);
+  Phi->addIncoming(Add, LoopBB);
+  // exit:
+  ReturnInst::Create(Context, nullptr, ExitBB);
+
+  // Make sure that SCEV doesn't blow up
+  ScalarEvolution SE = buildSE(*F);
+  SCEVUnionPredicate Preds;
+  const SCEV *Expr = SE.getSCEV(Phi);
+  EXPECT_NE(nullptr, Expr);
+  EXPECT_TRUE(isa<SCEVUnknown>(Expr));
+  auto Result = SE.createAddRecFromPHIWithCasts(cast<SCEVUnknown>(Expr));
+}
+
+TEST_F(ScalarEvolutionsTest, SCEVFoldSumOfTruncs) {
+  // Verify that the following SCEV gets folded to a zero:
+  //  (-1 * (trunc i64 (-1 * %0) to i32)) + (-1 * (trunc i64 %0 to i32)
+  Type *ArgTy = Type::getInt64Ty(Context);
+  Type *Int32Ty = Type::getInt32Ty(Context);
+  SmallVector<Type *, 1> Types;
+  Types.push_back(ArgTy);
+  FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), Types, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("f", FTy));
+  BasicBlock *BB = BasicBlock::Create(Context, "entry", F);
+  ReturnInst::Create(Context, nullptr, BB);
+
+  ScalarEvolution SE = buildSE(*F);
+
+  auto *Arg = &*(F->arg_begin());
+  const auto *ArgSCEV = SE.getSCEV(Arg);
+
+  // Build the SCEV
+  const auto *A0 = SE.getNegativeSCEV(ArgSCEV);
+  const auto *A1 = SE.getTruncateExpr(A0, Int32Ty);
+  const auto *A = SE.getNegativeSCEV(A1);
+
+  const auto *B0 = SE.getTruncateExpr(ArgSCEV, Int32Ty);
+  const auto *B = SE.getNegativeSCEV(B0);
+
+  const auto *Expr = SE.getAddExpr(A, B);
+  // Verify that the SCEV was folded to 0
+  const auto *ZeroConst = SE.getConstant(Int32Ty, 0);
+  EXPECT_EQ(Expr, ZeroConst);
+}
+
+// Check that we can correctly identify the points at which the SCEV of the
+// AddRec can be expanded.
+TEST_F(ScalarEvolutionsTest, SCEVExpanderIsSafeToExpandAt) {
+  /*
+   * Create the following code:
+   * func(i64 addrspace(10)* %arg)
+   * top:
+   *  br label %L.ph
+   * L.ph:
+   *  br label %L
+   * L:
+   *  %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
+   *  %add = add i64 %phi2, 1
+   *  %cond = icmp slt i64 %add, 1000; then becomes 2000.
+   *  br i1 %cond, label %post, label %L2
+   * post:
+   *  ret void
+   *
+   */
+
+  // Create a module with non-integral pointers in it's datalayout
+  Module NIM("nonintegral", Context);
+  std::string DataLayout = M.getDataLayoutStr();
+  if (!DataLayout.empty())
+    DataLayout += "-";
+  DataLayout += "ni:10";
+  NIM.setDataLayout(DataLayout);
+
+  Type *T_int64 = Type::getInt64Ty(Context);
+  Type *T_pint64 = T_int64->getPointerTo(10);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
+  Function *F = cast<Function>(NIM.getOrInsertFunction("foo", FTy));
+
+  BasicBlock *Top = BasicBlock::Create(Context, "top", F);
+  BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
+  BasicBlock *L = BasicBlock::Create(Context, "L", F);
+  BasicBlock *Post = BasicBlock::Create(Context, "post", F);
+
+  IRBuilder<> Builder(Top);
+  Builder.CreateBr(LPh);
+
+  Builder.SetInsertPoint(LPh);
+  Builder.CreateBr(L);
+
+  Builder.SetInsertPoint(L);
+  PHINode *Phi = Builder.CreatePHI(T_int64, 2);
+  auto *Add = cast<Instruction>(
+      Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
+  auto *Limit = ConstantInt::get(T_int64, 1000);
+  auto *Cond = cast<Instruction>(
+      Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
+  Builder.CreateCondBr(Cond, L, Post);
+  Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
+  Phi->addIncoming(Add, L);
+
+  Builder.SetInsertPoint(Post);
+  Builder.CreateRetVoid();
+
+  ScalarEvolution SE = buildSE(*F);
+  const SCEV *S = SE.getSCEV(Phi);
+  EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
+  const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
+  EXPECT_TRUE(AR->isAffine());
+  EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
+  EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
+  EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
+  EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
+}
+
+// Check that SCEV expander does not use the nuw instruction
+// for expansion.
+TEST_F(ScalarEvolutionsTest, SCEVExpanderNUW) {
+  /*
+   * Create the following code:
+   * func(i64 %a)
+   * entry:
+   *   br false, label %exit, label %body
+   * body:
+   *  %s1 = add i64 %a, -1
+   *  br label %exit
+   * exit:
+   *  %s = add nuw i64 %a, -1
+   *  ret %s
+   */
+
+  // Create a module.
+  Module M("SCEVExpanderNUW", Context);
+
+  Type *T_int64 = Type::getInt64Ty(Context);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("func", FTy));
+  Argument *Arg = &*F->arg_begin();
+  ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+  BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *Body = BasicBlock::Create(Context, "body", F);
+  BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+  IRBuilder<> Builder(Entry);
+  ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
+  Builder.CreateCondBr(Cond, Exit, Body);
+
+  Builder.SetInsertPoint(Body);
+  auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  Builder.CreateBr(Exit);
+
+  Builder.SetInsertPoint(Exit);
+  auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  S2->setHasNoUnsignedWrap(true);
+  auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+  ScalarEvolution SE = buildSE(*F);
+  const SCEV *S = SE.getSCEV(S1);
+  EXPECT_TRUE(isa<SCEVAddExpr>(S));
+  SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+  auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
+  EXPECT_FALSE(I->hasNoUnsignedWrap());
+}
+
+// Check that SCEV expander does not use the nsw instruction
+// for expansion.
+TEST_F(ScalarEvolutionsTest, SCEVExpanderNSW) {
+  /*
+   * Create the following code:
+   * func(i64 %a)
+   * entry:
+   *   br false, label %exit, label %body
+   * body:
+   *  %s1 = add i64 %a, -1
+   *  br label %exit
+   * exit:
+   *  %s = add nsw i64 %a, -1
+   *  ret %s
+   */
+
+  // Create a module.
+  Module M("SCEVExpanderNSW", Context);
+
+  Type *T_int64 = Type::getInt64Ty(Context);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("func", FTy));
+  Argument *Arg = &*F->arg_begin();
+  ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+  BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *Body = BasicBlock::Create(Context, "body", F);
+  BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+  IRBuilder<> Builder(Entry);
+  ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
+  Builder.CreateCondBr(Cond, Exit, Body);
+
+  Builder.SetInsertPoint(Body);
+  auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  Builder.CreateBr(Exit);
+
+  Builder.SetInsertPoint(Exit);
+  auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  S2->setHasNoSignedWrap(true);
+  auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+  ScalarEvolution SE = buildSE(*F);
+  const SCEV *S = SE.getSCEV(S1);
+  EXPECT_TRUE(isa<SCEVAddExpr>(S));
+  SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+  auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
+  EXPECT_FALSE(I->hasNoSignedWrap());
+}
+
+// Check that SCEV does not save the SCEV -> V
+// mapping of SCEV differ from V in NUW flag.
+TEST_F(ScalarEvolutionsTest, SCEVCacheNUW) {
+  /*
+   * Create the following code:
+   * func(i64 %a)
+   * entry:
+   *  %s1 = add i64 %a, -1
+   *  %s2 = add nuw i64 %a, -1
+   *  br label %exit
+   * exit:
+   *  ret %s
+   */
+
+  // Create a module.
+  Module M("SCEVCacheNUW", Context);
+
+  Type *T_int64 = Type::getInt64Ty(Context);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("func", FTy));
+  Argument *Arg = &*F->arg_begin();
+  ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+  BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+  IRBuilder<> Builder(Entry);
+  auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  S2->setHasNoUnsignedWrap(true);
+  Builder.CreateBr(Exit);
+
+  Builder.SetInsertPoint(Exit);
+  auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+  ScalarEvolution SE = buildSE(*F);
+  // Get S2 first to move it to cache.
+  const SCEV *SC2 = SE.getSCEV(S2);
+  EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
+  // Now get S1.
+  const SCEV *SC1 = SE.getSCEV(S1);
+  EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
+  // Expand for S1, it should use S1 not S2 in spite S2
+  // first in the cache.
+  SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+  auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
+  EXPECT_FALSE(I->hasNoUnsignedWrap());
+}
+
+// Check that SCEV does not save the SCEV -> V
+// mapping of SCEV differ from V in NSW flag.
+TEST_F(ScalarEvolutionsTest, SCEVCacheNSW) {
+  /*
+   * Create the following code:
+   * func(i64 %a)
+   * entry:
+   *  %s1 = add i64 %a, -1
+   *  %s2 = add nsw i64 %a, -1
+   *  br label %exit
+   * exit:
+   *  ret %s
+   */
+
+  // Create a module.
+  Module M("SCEVCacheNUW", Context);
+
+  Type *T_int64 = Type::getInt64Ty(Context);
+
+  FunctionType *FTy =
+      FunctionType::get(Type::getVoidTy(Context), { T_int64 }, false);
+  Function *F = cast<Function>(M.getOrInsertFunction("func", FTy));
+  Argument *Arg = &*F->arg_begin();
+  ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
+
+  BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+
+  IRBuilder<> Builder(Entry);
+  auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
+  S2->setHasNoSignedWrap(true);
+  Builder.CreateBr(Exit);
+
+  Builder.SetInsertPoint(Exit);
+  auto *R = cast<Instruction>(Builder.CreateRetVoid());
+
+  ScalarEvolution SE = buildSE(*F);
+  // Get S2 first to move it to cache.
+  const SCEV *SC2 = SE.getSCEV(S2);
+  EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
+  // Now get S1.
+  const SCEV *SC1 = SE.getSCEV(S1);
+  EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
+  // Expand for S1, it should use S1 not S2 in spite S2
+  // first in the cache.
+  SCEVExpander Exp(SE, M.getDataLayout(), "expander");
+  auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
+  EXPECT_FALSE(I->hasNoSignedWrap());
+}
+
+}  // end anonymous namespace
+}  // end namespace llvm
diff --git a/src/llvm-project/llvm/unittests/Analysis/SparsePropagation.cpp b/src/llvm-project/llvm/unittests/Analysis/SparsePropagation.cpp
new file mode 100644
index 0000000..298b140
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/SparsePropagation.cpp
@@ -0,0 +1,544 @@
+//===- SparsePropagation.cpp - Unit tests for the generic solver ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/SparsePropagation.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/IRBuilder.h"
+#include "gtest/gtest.h"
+using namespace llvm;
+
+namespace {
+/// To enable interprocedural analysis, we assign LLVM values to the following
+/// groups. The register group represents SSA registers, the return group
+/// represents the return values of functions, and the memory group represents
+/// in-memory values. An LLVM Value can technically be in more than one group.
+/// It's necessary to distinguish these groups so we can, for example, track a
+/// global variable separately from the value stored at its location.
+enum class IPOGrouping { Register, Return, Memory };
+
+/// Our LatticeKeys are PointerIntPairs composed of LLVM values and groupings.
+/// The PointerIntPair header provides a DenseMapInfo specialization, so using
+/// these as LatticeKeys is fine.
+using TestLatticeKey = PointerIntPair<Value *, 2, IPOGrouping>;
+} // namespace
+
+namespace llvm {
+/// A specialization of LatticeKeyInfo for TestLatticeKeys. The generic solver
+/// must translate between LatticeKeys and LLVM Values when adding Values to
+/// its work list and inspecting the state of control-flow related values.
+template <> struct LatticeKeyInfo<TestLatticeKey> {
+  static inline Value *getValueFromLatticeKey(TestLatticeKey Key) {
+    return Key.getPointer();
+  }
+  static inline TestLatticeKey getLatticeKeyFromValue(Value *V) {
+    return TestLatticeKey(V, IPOGrouping::Register);
+  }
+};
+} // namespace llvm
+
+namespace {
+/// This class defines a simple test lattice value that could be used for
+/// solving problems similar to constant propagation. The value is maintained
+/// as a PointerIntPair.
+class TestLatticeVal {
+public:
+  /// The states of the lattices value. Only the ConstantVal state is
+  /// interesting; the rest are special states used by the generic solver. The
+  /// UntrackedVal state differs from the other three in that the generic
+  /// solver uses it to avoid doing unnecessary work. In particular, when a
+  /// value moves to the UntrackedVal state, it's users are not notified.
+  enum TestLatticeStateTy {
+    UndefinedVal,
+    ConstantVal,
+    OverdefinedVal,
+    UntrackedVal
+  };
+
+  TestLatticeVal() : LatticeVal(nullptr, UndefinedVal) {}
+  TestLatticeVal(Constant *C, TestLatticeStateTy State)
+      : LatticeVal(C, State) {}
+
+  /// Return true if this lattice value is in the Constant state. This is used
+  /// for checking the solver results.
+  bool isConstant() const { return LatticeVal.getInt() == ConstantVal; }
+
+  /// Return true if this lattice value is in the Overdefined state. This is
+  /// used for checking the solver results.
+  bool isOverdefined() const { return LatticeVal.getInt() == OverdefinedVal; }
+
+  bool operator==(const TestLatticeVal &RHS) const {
+    return LatticeVal == RHS.LatticeVal;
+  }
+
+  bool operator!=(const TestLatticeVal &RHS) const {
+    return LatticeVal != RHS.LatticeVal;
+  }
+
+private:
+  /// A simple lattice value type for problems similar to constant propagation.
+  /// It holds the constant value and the lattice state.
+  PointerIntPair<const Constant *, 2, TestLatticeStateTy> LatticeVal;
+};
+
+/// This class defines a simple test lattice function that could be used for
+/// solving problems similar to constant propagation. The test lattice differs
+/// from a "real" lattice in a few ways. First, it initializes all return
+/// values, values stored in global variables, and arguments in the undefined
+/// state. This means that there are no limitations on what we can track
+/// interprocedurally. For simplicity, all global values in the tests will be
+/// given internal linkage, since this is not something this lattice function
+/// tracks. Second, it only handles the few instructions necessary for the
+/// tests.
+class TestLatticeFunc
+    : public AbstractLatticeFunction<TestLatticeKey, TestLatticeVal> {
+public:
+  /// Construct a new test lattice function with special values for the
+  /// Undefined, Overdefined, and Untracked states.
+  TestLatticeFunc()
+      : AbstractLatticeFunction(
+            TestLatticeVal(nullptr, TestLatticeVal::UndefinedVal),
+            TestLatticeVal(nullptr, TestLatticeVal::OverdefinedVal),
+            TestLatticeVal(nullptr, TestLatticeVal::UntrackedVal)) {}
+
+  /// Compute and return a TestLatticeVal for the given TestLatticeKey. For the
+  /// test analysis, a LatticeKey will begin in the undefined state, unless it
+  /// represents an LLVM Constant in the register grouping.
+  TestLatticeVal ComputeLatticeVal(TestLatticeKey Key) override {
+    if (Key.getInt() == IPOGrouping::Register)
+      if (auto *C = dyn_cast<Constant>(Key.getPointer()))
+        return TestLatticeVal(C, TestLatticeVal::ConstantVal);
+    return getUndefVal();
+  }
+
+  /// Merge the two given lattice values. This merge should be equivalent to
+  /// what is done for constant propagation. That is, the resulting lattice
+  /// value is constant only if the two given lattice values are constant and
+  /// hold the same value.
+  TestLatticeVal MergeValues(TestLatticeVal X, TestLatticeVal Y) override {
+    if (X == getUntrackedVal() || Y == getUntrackedVal())
+      return getUntrackedVal();
+    if (X == getOverdefinedVal() || Y == getOverdefinedVal())
+      return getOverdefinedVal();
+    if (X == getUndefVal() && Y == getUndefVal())
+      return getUndefVal();
+    if (X == getUndefVal())
+      return Y;
+    if (Y == getUndefVal())
+      return X;
+    if (X == Y)
+      return X;
+    return getOverdefinedVal();
+  }
+
+  /// Compute the lattice values that change as a result of executing the given
+  /// instruction. We only handle the few instructions needed for the tests.
+  void ComputeInstructionState(
+      Instruction &I, DenseMap<TestLatticeKey, TestLatticeVal> &ChangedValues,
+      SparseSolver<TestLatticeKey, TestLatticeVal> &SS) override {
+    switch (I.getOpcode()) {
+    case Instruction::Call:
+      return visitCallSite(cast<CallInst>(&I), ChangedValues, SS);
+    case Instruction::Ret:
+      return visitReturn(*cast<ReturnInst>(&I), ChangedValues, SS);
+    case Instruction::Store:
+      return visitStore(*cast<StoreInst>(&I), ChangedValues, SS);
+    default:
+      return visitInst(I, ChangedValues, SS);
+    }
+  }
+
+private:
+  /// Handle call sites. The state of a called function's argument is the merge
+  /// of the current formal argument state with the call site's corresponding
+  /// actual argument state. The call site state is the merge of the call site
+  /// state with the returned value state of the called function.
+  void visitCallSite(CallSite CS,
+                     DenseMap<TestLatticeKey, TestLatticeVal> &ChangedValues,
+                     SparseSolver<TestLatticeKey, TestLatticeVal> &SS) {
+    Function *F = CS.getCalledFunction();
+    Instruction *I = CS.getInstruction();
+    auto RegI = TestLatticeKey(I, IPOGrouping::Register);
+    if (!F) {
+      ChangedValues[RegI] = getOverdefinedVal();
+      return;
+    }
+    SS.MarkBlockExecutable(&F->front());
+    for (Argument &A : F->args()) {
+      auto RegFormal = TestLatticeKey(&A, IPOGrouping::Register);
+      auto RegActual =
+          TestLatticeKey(CS.getArgument(A.getArgNo()), IPOGrouping::Register);
+      ChangedValues[RegFormal] =
+          MergeValues(SS.getValueState(RegFormal), SS.getValueState(RegActual));
+    }
+    auto RetF = TestLatticeKey(F, IPOGrouping::Return);
+    ChangedValues[RegI] =
+        MergeValues(SS.getValueState(RegI), SS.getValueState(RetF));
+  }
+
+  /// Handle return instructions. The function's return state is the merge of
+  /// the returned value state and the function's current return state.
+  void visitReturn(ReturnInst &I,
+                   DenseMap<TestLatticeKey, TestLatticeVal> &ChangedValues,
+                   SparseSolver<TestLatticeKey, TestLatticeVal> &SS) {
+    Function *F = I.getParent()->getParent();
+    if (F->getReturnType()->isVoidTy())
+      return;
+    auto RegR = TestLatticeKey(I.getReturnValue(), IPOGrouping::Register);
+    auto RetF = TestLatticeKey(F, IPOGrouping::Return);
+    ChangedValues[RetF] =
+        MergeValues(SS.getValueState(RegR), SS.getValueState(RetF));
+  }
+
+  /// Handle store instructions. If the pointer operand of the store is a
+  /// global variable, we attempt to track the value. The global variable state
+  /// is the merge of the stored value state with the current global variable
+  /// state.
+  void visitStore(StoreInst &I,
+                  DenseMap<TestLatticeKey, TestLatticeVal> &ChangedValues,
+                  SparseSolver<TestLatticeKey, TestLatticeVal> &SS) {
+    auto *GV = dyn_cast<GlobalVariable>(I.getPointerOperand());
+    if (!GV)
+      return;
+    auto RegVal = TestLatticeKey(I.getValueOperand(), IPOGrouping::Register);
+    auto MemPtr = TestLatticeKey(GV, IPOGrouping::Memory);
+    ChangedValues[MemPtr] =
+        MergeValues(SS.getValueState(RegVal), SS.getValueState(MemPtr));
+  }
+
+  /// Handle all other instructions. All other instructions are marked
+  /// overdefined.
+  void visitInst(Instruction &I,
+                 DenseMap<TestLatticeKey, TestLatticeVal> &ChangedValues,
+                 SparseSolver<TestLatticeKey, TestLatticeVal> &SS) {
+    auto RegI = TestLatticeKey(&I, IPOGrouping::Register);
+    ChangedValues[RegI] = getOverdefinedVal();
+  }
+};
+
+/// This class defines the common data used for all of the tests. The tests
+/// should add code to the module and then run the solver.
+class SparsePropagationTest : public testing::Test {
+protected:
+  LLVMContext Context;
+  Module M;
+  IRBuilder<> Builder;
+  TestLatticeFunc Lattice;
+  SparseSolver<TestLatticeKey, TestLatticeVal> Solver;
+
+public:
+  SparsePropagationTest()
+      : M("", Context), Builder(Context), Solver(&Lattice) {}
+};
+} // namespace
+
+/// Test that we mark discovered functions executable.
+///
+/// define internal void @f() {
+///   call void @g()
+///   ret void
+/// }
+///
+/// define internal void @g() {
+///   call void @f()
+///   ret void
+/// }
+///
+/// For this test, we initially mark "f" executable, and the solver discovers
+/// "g" because of the call in "f". The mutually recursive call in "g" also
+/// tests that we don't add a block to the basic block work list if it is
+/// already executable. Doing so would put the solver into an infinite loop.
+TEST_F(SparsePropagationTest, MarkBlockExecutable) {
+  Function *F = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "f", &M);
+  Function *G = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "g", &M);
+  BasicBlock *FEntry = BasicBlock::Create(Context, "", F);
+  BasicBlock *GEntry = BasicBlock::Create(Context, "", G);
+  Builder.SetInsertPoint(FEntry);
+  Builder.CreateCall(G);
+  Builder.CreateRetVoid();
+  Builder.SetInsertPoint(GEntry);
+  Builder.CreateCall(F);
+  Builder.CreateRetVoid();
+
+  Solver.MarkBlockExecutable(FEntry);
+  Solver.Solve();
+
+  EXPECT_TRUE(Solver.isBlockExecutable(GEntry));
+}
+
+/// Test that we propagate information through global variables.
+///
+/// @gv = internal global i64
+///
+/// define internal void @f() {
+///   store i64 1, i64* @gv
+///   ret void
+/// }
+///
+/// define internal void @g() {
+///   store i64 1, i64* @gv
+///   ret void
+/// }
+///
+/// For this test, we initially mark both "f" and "g" executable, and the
+/// solver computes the lattice state of the global variable as constant.
+TEST_F(SparsePropagationTest, GlobalVariableConstant) {
+  Function *F = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "f", &M);
+  Function *G = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "g", &M);
+  GlobalVariable *GV =
+      new GlobalVariable(M, Builder.getInt64Ty(), false,
+                         GlobalValue::InternalLinkage, nullptr, "gv");
+  BasicBlock *FEntry = BasicBlock::Create(Context, "", F);
+  BasicBlock *GEntry = BasicBlock::Create(Context, "", G);
+  Builder.SetInsertPoint(FEntry);
+  Builder.CreateStore(Builder.getInt64(1), GV);
+  Builder.CreateRetVoid();
+  Builder.SetInsertPoint(GEntry);
+  Builder.CreateStore(Builder.getInt64(1), GV);
+  Builder.CreateRetVoid();
+
+  Solver.MarkBlockExecutable(FEntry);
+  Solver.MarkBlockExecutable(GEntry);
+  Solver.Solve();
+
+  auto MemGV = TestLatticeKey(GV, IPOGrouping::Memory);
+  EXPECT_TRUE(Solver.getExistingValueState(MemGV).isConstant());
+}
+
+/// Test that we propagate information through global variables.
+///
+/// @gv = internal global i64
+///
+/// define internal void @f() {
+///   store i64 0, i64* @gv
+///   ret void
+/// }
+///
+/// define internal void @g() {
+///   store i64 1, i64* @gv
+///   ret void
+/// }
+///
+/// For this test, we initially mark both "f" and "g" executable, and the
+/// solver computes the lattice state of the global variable as overdefined.
+TEST_F(SparsePropagationTest, GlobalVariableOverDefined) {
+  Function *F = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "f", &M);
+  Function *G = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "g", &M);
+  GlobalVariable *GV =
+      new GlobalVariable(M, Builder.getInt64Ty(), false,
+                         GlobalValue::InternalLinkage, nullptr, "gv");
+  BasicBlock *FEntry = BasicBlock::Create(Context, "", F);
+  BasicBlock *GEntry = BasicBlock::Create(Context, "", G);
+  Builder.SetInsertPoint(FEntry);
+  Builder.CreateStore(Builder.getInt64(0), GV);
+  Builder.CreateRetVoid();
+  Builder.SetInsertPoint(GEntry);
+  Builder.CreateStore(Builder.getInt64(1), GV);
+  Builder.CreateRetVoid();
+
+  Solver.MarkBlockExecutable(FEntry);
+  Solver.MarkBlockExecutable(GEntry);
+  Solver.Solve();
+
+  auto MemGV = TestLatticeKey(GV, IPOGrouping::Memory);
+  EXPECT_TRUE(Solver.getExistingValueState(MemGV).isOverdefined());
+}
+
+/// Test that we propagate information through function returns.
+///
+/// define internal i64 @f(i1* %cond) {
+/// if:
+///   %0 = load i1, i1* %cond
+///   br i1 %0, label %then, label %else
+///
+/// then:
+///   ret i64 1
+///
+/// else:
+///   ret i64 1
+/// }
+///
+/// For this test, we initially mark "f" executable, and the solver computes
+/// the return value of the function as constant.
+TEST_F(SparsePropagationTest, FunctionDefined) {
+  Function *F =
+      Function::Create(FunctionType::get(Builder.getInt64Ty(),
+                                         {Type::getInt1PtrTy(Context)}, false),
+                       GlobalValue::InternalLinkage, "f", &M);
+  BasicBlock *If = BasicBlock::Create(Context, "if", F);
+  BasicBlock *Then = BasicBlock::Create(Context, "then", F);
+  BasicBlock *Else = BasicBlock::Create(Context, "else", F);
+  F->arg_begin()->setName("cond");
+  Builder.SetInsertPoint(If);
+  LoadInst *Cond = Builder.CreateLoad(F->arg_begin());
+  Builder.CreateCondBr(Cond, Then, Else);
+  Builder.SetInsertPoint(Then);
+  Builder.CreateRet(Builder.getInt64(1));
+  Builder.SetInsertPoint(Else);
+  Builder.CreateRet(Builder.getInt64(1));
+
+  Solver.MarkBlockExecutable(If);
+  Solver.Solve();
+
+  auto RetF = TestLatticeKey(F, IPOGrouping::Return);
+  EXPECT_TRUE(Solver.getExistingValueState(RetF).isConstant());
+}
+
+/// Test that we propagate information through function returns.
+///
+/// define internal i64 @f(i1* %cond) {
+/// if:
+///   %0 = load i1, i1* %cond
+///   br i1 %0, label %then, label %else
+///
+/// then:
+///   ret i64 0
+///
+/// else:
+///   ret i64 1
+/// }
+///
+/// For this test, we initially mark "f" executable, and the solver computes
+/// the return value of the function as overdefined.
+TEST_F(SparsePropagationTest, FunctionOverDefined) {
+  Function *F =
+      Function::Create(FunctionType::get(Builder.getInt64Ty(),
+                                         {Type::getInt1PtrTy(Context)}, false),
+                       GlobalValue::InternalLinkage, "f", &M);
+  BasicBlock *If = BasicBlock::Create(Context, "if", F);
+  BasicBlock *Then = BasicBlock::Create(Context, "then", F);
+  BasicBlock *Else = BasicBlock::Create(Context, "else", F);
+  F->arg_begin()->setName("cond");
+  Builder.SetInsertPoint(If);
+  LoadInst *Cond = Builder.CreateLoad(F->arg_begin());
+  Builder.CreateCondBr(Cond, Then, Else);
+  Builder.SetInsertPoint(Then);
+  Builder.CreateRet(Builder.getInt64(0));
+  Builder.SetInsertPoint(Else);
+  Builder.CreateRet(Builder.getInt64(1));
+
+  Solver.MarkBlockExecutable(If);
+  Solver.Solve();
+
+  auto RetF = TestLatticeKey(F, IPOGrouping::Return);
+  EXPECT_TRUE(Solver.getExistingValueState(RetF).isOverdefined());
+}
+
+/// Test that we propagate information through arguments.
+///
+/// define internal void @f() {
+///   call void @g(i64 0, i64 1)
+///   call void @g(i64 1, i64 1)
+///   ret void
+/// }
+///
+/// define internal void @g(i64 %a, i64 %b) {
+///   ret void
+/// }
+///
+/// For this test, we initially mark "f" executable, and the solver discovers
+/// "g" because of the calls in "f". The solver computes the state of argument
+/// "a" as overdefined and the state of "b" as constant.
+///
+/// In addition, this test demonstrates that ComputeInstructionState can alter
+/// the state of multiple lattice values, in addition to the one associated
+/// with the instruction definition. Each call instruction in this test updates
+/// the state of arguments "a" and "b".
+TEST_F(SparsePropagationTest, ComputeInstructionState) {
+  Function *F = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "f", &M);
+  Function *G = Function::Create(
+      FunctionType::get(Builder.getVoidTy(),
+                        {Builder.getInt64Ty(), Builder.getInt64Ty()}, false),
+      GlobalValue::InternalLinkage, "g", &M);
+  Argument *A = G->arg_begin();
+  Argument *B = std::next(G->arg_begin());
+  A->setName("a");
+  B->setName("b");
+  BasicBlock *FEntry = BasicBlock::Create(Context, "", F);
+  BasicBlock *GEntry = BasicBlock::Create(Context, "", G);
+  Builder.SetInsertPoint(FEntry);
+  Builder.CreateCall(G, {Builder.getInt64(0), Builder.getInt64(1)});
+  Builder.CreateCall(G, {Builder.getInt64(1), Builder.getInt64(1)});
+  Builder.CreateRetVoid();
+  Builder.SetInsertPoint(GEntry);
+  Builder.CreateRetVoid();
+
+  Solver.MarkBlockExecutable(FEntry);
+  Solver.Solve();
+
+  auto RegA = TestLatticeKey(A, IPOGrouping::Register);
+  auto RegB = TestLatticeKey(B, IPOGrouping::Register);
+  EXPECT_TRUE(Solver.getExistingValueState(RegA).isOverdefined());
+  EXPECT_TRUE(Solver.getExistingValueState(RegB).isConstant());
+}
+
+/// Test that we can handle exceptional terminator instructions.
+///
+/// declare internal void @p()
+///
+/// declare internal void @g()
+///
+/// define internal void @f() personality i8* bitcast (void ()* @p to i8*) {
+/// entry:
+///   invoke void @g()
+///           to label %exit unwind label %catch.pad
+///
+/// catch.pad:
+///   %0 = catchswitch within none [label %catch.body] unwind to caller
+///
+/// catch.body:
+///   %1 = catchpad within %0 []
+///   catchret from %1 to label %exit
+///
+/// exit:
+///   ret void
+/// }
+///
+/// For this test, we initially mark the entry block executable. The solver
+/// then discovers the rest of the blocks in the function are executable.
+TEST_F(SparsePropagationTest, ExceptionalTerminatorInsts) {
+  Function *P = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "p", &M);
+  Function *G = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "g", &M);
+  Function *F = Function::Create(FunctionType::get(Builder.getVoidTy(), false),
+                                 GlobalValue::InternalLinkage, "f", &M);
+  Constant *C =
+      ConstantExpr::getCast(Instruction::BitCast, P, Builder.getInt8PtrTy());
+  F->setPersonalityFn(C);
+  BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
+  BasicBlock *Pad = BasicBlock::Create(Context, "catch.pad", F);
+  BasicBlock *Body = BasicBlock::Create(Context, "catch.body", F);
+  BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
+  Builder.SetInsertPoint(Entry);
+  Builder.CreateInvoke(G, Exit, Pad);
+  Builder.SetInsertPoint(Pad);
+  CatchSwitchInst *CatchSwitch =
+      Builder.CreateCatchSwitch(ConstantTokenNone::get(Context), nullptr, 1);
+  CatchSwitch->addHandler(Body);
+  Builder.SetInsertPoint(Body);
+  CatchPadInst *CatchPad = Builder.CreateCatchPad(CatchSwitch, {});
+  Builder.CreateCatchRet(CatchPad, Exit);
+  Builder.SetInsertPoint(Exit);
+  Builder.CreateRetVoid();
+
+  Solver.MarkBlockExecutable(Entry);
+  Solver.Solve();
+
+  EXPECT_TRUE(Solver.isBlockExecutable(Pad));
+  EXPECT_TRUE(Solver.isBlockExecutable(Body));
+  EXPECT_TRUE(Solver.isBlockExecutable(Exit));
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/TBAATest.cpp b/src/llvm-project/llvm/unittests/Analysis/TBAATest.cpp
new file mode 100644
index 0000000..f3f05d8
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/TBAATest.cpp
@@ -0,0 +1,91 @@
+//===--- TBAATest.cpp - Mixed TBAA unit tests -----------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/AliasAnalysisEvaluator.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/CommandLine.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+class TBAATest : public testing::Test {
+protected:
+  TBAATest() : M("TBAATest", C), MD(C) {}
+
+  LLVMContext C;
+  Module M;
+  MDBuilder MD;
+};
+
+static StoreInst *getFunctionWithSingleStore(Module *M, StringRef Name) {
+  auto &C = M->getContext();
+  FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), {});
+  auto *F = cast<Function>(M->getOrInsertFunction(Name, FTy));
+  auto *BB = BasicBlock::Create(C, "entry", F);
+  auto *IntType = Type::getInt32Ty(C);
+  auto *PtrType = Type::getInt32PtrTy(C);
+  auto *SI = new StoreInst(ConstantInt::get(IntType, 42),
+                           ConstantPointerNull::get(PtrType), BB);
+  ReturnInst::Create(C, nullptr, BB);
+
+  return SI;
+}
+
+TEST_F(TBAATest, checkVerifierBehaviorForOldTBAA) {
+  auto *SI = getFunctionWithSingleStore(&M, "f1");
+  auto *F = SI->getFunction();
+
+  // C++ unit test case to avoid going through the auto upgrade logic.
+  auto *RootMD = MD.createTBAARoot("Simple C/C++ TBAA");
+  auto *MD1 = MD.createTBAANode("omnipotent char", RootMD);
+  auto *MD2 = MD.createTBAANode("int", MD1);
+  SI->setMetadata(LLVMContext::MD_tbaa, MD2);
+
+  SmallVector<char, 0> ErrorMsg;
+  raw_svector_ostream Outs(ErrorMsg);
+
+  StringRef ExpectedFailureMsg(
+      "Old-style TBAA is no longer allowed, use struct-path TBAA instead");
+
+  EXPECT_TRUE(verifyFunction(*F, &Outs));
+  EXPECT_TRUE(StringRef(ErrorMsg.begin(), ErrorMsg.size())
+                  .startswith(ExpectedFailureMsg));
+}
+
+TEST_F(TBAATest, checkTBAAMerging) {
+  auto *SI = getFunctionWithSingleStore(&M, "f2");
+  auto *F = SI->getFunction();
+
+  auto *RootMD = MD.createTBAARoot("tbaa-root");
+  auto *MD1 = MD.createTBAANode("scalar-a", RootMD);
+  auto *StructTag1 = MD.createTBAAStructTagNode(MD1, MD1, 0);
+  auto *MD2 = MD.createTBAANode("scalar-b", RootMD);
+  auto *StructTag2 = MD.createTBAAStructTagNode(MD2, MD2, 0);
+
+  auto *GenericMD = MDNode::getMostGenericTBAA(StructTag1, StructTag2);
+
+  EXPECT_EQ(GenericMD, nullptr);
+
+  // Despite GenericMD being nullptr, we expect the setMetadata call to be well
+  // defined and produce a well-formed function.
+  SI->setMetadata(LLVMContext::MD_tbaa, GenericMD);
+
+  EXPECT_TRUE(!verifyFunction(*F));
+}
+
+} // end anonymous namspace
+} // end llvm namespace
diff --git a/src/llvm-project/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp b/src/llvm-project/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
new file mode 100644
index 0000000..482d9d8
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/TargetLibraryInfoTest.cpp
@@ -0,0 +1,561 @@
+//===--- TargetLibraryInfoTest.cpp - TLI/LibFunc unit tests ---------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+class TargetLibraryInfoTest : public testing::Test {
+protected:
+  LLVMContext Context;
+  TargetLibraryInfoImpl TLII;
+  TargetLibraryInfo TLI;
+
+  std::unique_ptr<Module> M;
+
+  TargetLibraryInfoTest() : TLI(TLII) {}
+
+  void parseAssembly(const char *Assembly) {
+    SMDiagnostic Error;
+    M = parseAssemblyString(Assembly, Error, Context);
+
+    std::string errMsg;
+    raw_string_ostream os(errMsg);
+    Error.print("", os);
+
+    if (!M)
+      report_fatal_error(os.str());
+  }
+
+  ::testing::AssertionResult isLibFunc(const Function *FDecl,
+                                       LibFunc ExpectedLF) {
+    StringRef ExpectedLFName = TLI.getName(ExpectedLF);
+
+    if (!FDecl)
+      return ::testing::AssertionFailure() << ExpectedLFName << " not found";
+
+    LibFunc F;
+    if (!TLI.getLibFunc(*FDecl, F))
+      return ::testing::AssertionFailure() << ExpectedLFName << " invalid";
+
+    return ::testing::AssertionSuccess() << ExpectedLFName << " is LibFunc";
+  }
+};
+
+} // end anonymous namespace
+
+// Check that we don't accept egregiously incorrect prototypes.
+TEST_F(TargetLibraryInfoTest, InvalidProto) {
+  parseAssembly("%foo = type { %foo }\n");
+
+  auto *StructTy = M->getTypeByName("foo");
+  auto *InvalidFTy = FunctionType::get(StructTy, /*isVarArg=*/false);
+
+  for (unsigned FI = 0; FI != LibFunc::NumLibFuncs; ++FI) {
+    LibFunc LF = (LibFunc)FI;
+    auto *F = cast<Function>(
+        M->getOrInsertFunction(TLI.getName(LF), InvalidFTy));
+    EXPECT_FALSE(isLibFunc(F, LF));
+  }
+}
+
+// Check that we do accept know-correct prototypes.
+TEST_F(TargetLibraryInfoTest, ValidProto) {
+  parseAssembly(
+      // These functions use a 64-bit size_t; use the appropriate datalayout.
+      "target datalayout = \"p:64:64:64\"\n"
+
+      // Struct pointers are replaced with an opaque pointer.
+      "%struct = type opaque\n"
+
+      // These functions were extracted as-is from the OS X headers.
+      "declare double @__cospi(double)\n"
+      "declare float @__cospif(float)\n"
+      "declare { double, double } @__sincospi_stret(double)\n"
+      "declare <2 x float> @__sincospif_stret(float)\n"
+      "declare double @__sinpi(double)\n"
+      "declare float @__sinpif(float)\n"
+      "declare i32 @abs(i32)\n"
+      "declare i32 @access(i8*, i32)\n"
+      "declare double @acos(double)\n"
+      "declare float @acosf(float)\n"
+      "declare double @acosh(double)\n"
+      "declare float @acoshf(float)\n"
+      "declare x86_fp80 @acoshl(x86_fp80)\n"
+      "declare x86_fp80 @acosl(x86_fp80)\n"
+      "declare double @asin(double)\n"
+      "declare float @asinf(float)\n"
+      "declare double @asinh(double)\n"
+      "declare float @asinhf(float)\n"
+      "declare x86_fp80 @asinhl(x86_fp80)\n"
+      "declare x86_fp80 @asinl(x86_fp80)\n"
+      "declare double @atan(double)\n"
+      "declare double @atan2(double, double)\n"
+      "declare float @atan2f(float, float)\n"
+      "declare x86_fp80 @atan2l(x86_fp80, x86_fp80)\n"
+      "declare float @atanf(float)\n"
+      "declare double @atanh(double)\n"
+      "declare float @atanhf(float)\n"
+      "declare x86_fp80 @atanhl(x86_fp80)\n"
+      "declare x86_fp80 @atanl(x86_fp80)\n"
+      "declare double @atof(i8*)\n"
+      "declare i32 @atoi(i8*)\n"
+      "declare i64 @atol(i8*)\n"
+      "declare i64 @atoll(i8*)\n"
+      "declare i32 @bcmp(i8*, i8*, i64)\n"
+      "declare void @bcopy(i8*, i8*, i64)\n"
+      "declare void @bzero(i8*, i64)\n"
+      "declare i8* @calloc(i64, i64)\n"
+      "declare double @cbrt(double)\n"
+      "declare float @cbrtf(float)\n"
+      "declare x86_fp80 @cbrtl(x86_fp80)\n"
+      "declare double @ceil(double)\n"
+      "declare float @ceilf(float)\n"
+      "declare x86_fp80 @ceill(x86_fp80)\n"
+      "declare i32 @chown(i8*, i32, i32)\n"
+      "declare void @clearerr(%struct*)\n"
+      "declare double @copysign(double, double)\n"
+      "declare float @copysignf(float, float)\n"
+      "declare x86_fp80 @copysignl(x86_fp80, x86_fp80)\n"
+      "declare double @cabs([2 x double])\n"
+      "declare float @cabsf([2 x float])\n"
+      "declare x86_fp80 @cabsl([2 x x86_fp80])\n"
+      "declare double @cos(double)\n"
+      "declare float @cosf(float)\n"
+      "declare double @cosh(double)\n"
+      "declare float @coshf(float)\n"
+      "declare x86_fp80 @coshl(x86_fp80)\n"
+      "declare x86_fp80 @cosl(x86_fp80)\n"
+      "declare i8* @ctermid(i8*)\n"
+      "declare double @exp(double)\n"
+      "declare double @exp2(double)\n"
+      "declare float @exp2f(float)\n"
+      "declare x86_fp80 @exp2l(x86_fp80)\n"
+      "declare float @expf(float)\n"
+      "declare x86_fp80 @expl(x86_fp80)\n"
+      "declare double @expm1(double)\n"
+      "declare float @expm1f(float)\n"
+      "declare x86_fp80 @expm1l(x86_fp80)\n"
+      "declare double @fabs(double)\n"
+      "declare float @fabsf(float)\n"
+      "declare x86_fp80 @fabsl(x86_fp80)\n"
+      "declare i32 @fclose(%struct*)\n"
+      "declare i32 @feof(%struct*)\n"
+      "declare i32 @ferror(%struct*)\n"
+      "declare i32 @fflush(%struct*)\n"
+      "declare i32 @ffs(i32)\n"
+      "declare i32 @ffsl(i64)\n"
+      "declare i32 @ffsll(i64)\n"
+      "declare i32 @fgetc(%struct*)\n"
+      "declare i32 @fgetc_unlocked(%struct*)\n"
+      "declare i32 @fgetpos(%struct*, i64*)\n"
+      "declare i8* @fgets(i8*, i32, %struct*)\n"
+      "declare i8* @fgets_unlocked(i8*, i32, %struct*)\n"
+      "declare i32 @fileno(%struct*)\n"
+      "declare void @flockfile(%struct*)\n"
+      "declare double @floor(double)\n"
+      "declare float @floorf(float)\n"
+      "declare x86_fp80 @floorl(x86_fp80)\n"
+      "declare i32 @fls(i32)\n"
+      "declare i32 @flsl(i64)\n"
+      "declare i32 @flsll(i64)\n"
+      "declare double @fmax(double, double)\n"
+      "declare float @fmaxf(float, float)\n"
+      "declare x86_fp80 @fmaxl(x86_fp80, x86_fp80)\n"
+      "declare double @fmin(double, double)\n"
+      "declare float @fminf(float, float)\n"
+      "declare x86_fp80 @fminl(x86_fp80, x86_fp80)\n"
+      "declare double @fmod(double, double)\n"
+      "declare float @fmodf(float, float)\n"
+      "declare x86_fp80 @fmodl(x86_fp80, x86_fp80)\n"
+      "declare i32 @fprintf(%struct*, i8*, ...)\n"
+      "declare i32 @fputc(i32, %struct*)\n"
+      "declare i32 @fputc_unlocked(i32, %struct*)\n"
+      "declare i64 @fread(i8*, i64, i64, %struct*)\n"
+      "declare i64 @fread_unlocked(i8*, i64, i64, %struct*)\n"
+      "declare void @free(i8*)\n"
+      "declare double @frexp(double, i32*)\n"
+      "declare float @frexpf(float, i32*)\n"
+      "declare x86_fp80 @frexpl(x86_fp80, i32*)\n"
+      "declare i32 @fscanf(%struct*, i8*, ...)\n"
+      "declare i32 @fseek(%struct*, i64, i32)\n"
+      "declare i32 @fseeko(%struct*, i64, i32)\n"
+      "declare i32 @fsetpos(%struct*, i64*)\n"
+      "declare i32 @fstatvfs(i32, %struct*)\n"
+      "declare i64 @ftell(%struct*)\n"
+      "declare i64 @ftello(%struct*)\n"
+      "declare i32 @ftrylockfile(%struct*)\n"
+      "declare void @funlockfile(%struct*)\n"
+      "declare i32 @getc(%struct*)\n"
+      "declare i32 @getc_unlocked(%struct*)\n"
+      "declare i32 @getchar()\n"
+      "declare i32 @getchar_unlocked()\n"
+      "declare i8* @getenv(i8*)\n"
+      "declare i32 @getitimer(i32, %struct*)\n"
+      "declare i32 @getlogin_r(i8*, i64)\n"
+      "declare %struct* @getpwnam(i8*)\n"
+      "declare i8* @gets(i8*)\n"
+      "declare i32 @gettimeofday(%struct*, i8*)\n"
+      "declare i32 @_Z7isasciii(i32)\n"
+      "declare i32 @_Z7isdigiti(i32)\n"
+      "declare i64 @labs(i64)\n"
+      "declare double @ldexp(double, i32)\n"
+      "declare float @ldexpf(float, i32)\n"
+      "declare x86_fp80 @ldexpl(x86_fp80, i32)\n"
+      "declare i64 @llabs(i64)\n"
+      "declare double @log(double)\n"
+      "declare double @log10(double)\n"
+      "declare float @log10f(float)\n"
+      "declare x86_fp80 @log10l(x86_fp80)\n"
+      "declare double @log1p(double)\n"
+      "declare float @log1pf(float)\n"
+      "declare x86_fp80 @log1pl(x86_fp80)\n"
+      "declare double @log2(double)\n"
+      "declare float @log2f(float)\n"
+      "declare x86_fp80 @log2l(x86_fp80)\n"
+      "declare double @logb(double)\n"
+      "declare float @logbf(float)\n"
+      "declare x86_fp80 @logbl(x86_fp80)\n"
+      "declare float @logf(float)\n"
+      "declare x86_fp80 @logl(x86_fp80)\n"
+      "declare i8* @malloc(i64)\n"
+      "declare i8* @memccpy(i8*, i8*, i32, i64)\n"
+      "declare i8* @memchr(i8*, i32, i64)\n"
+      "declare i32 @memcmp(i8*, i8*, i64)\n"
+      "declare i8* @memcpy(i8*, i8*, i64)\n"
+      "declare i8* @memmove(i8*, i8*, i64)\n"
+      "declare i8* @memset(i8*, i32, i64)\n"
+      "declare void @memset_pattern16(i8*, i8*, i64)\n"
+      "declare i32 @mkdir(i8*, i16)\n"
+      "declare double @modf(double, double*)\n"
+      "declare float @modff(float, float*)\n"
+      "declare x86_fp80 @modfl(x86_fp80, x86_fp80*)\n"
+      "declare double @nearbyint(double)\n"
+      "declare float @nearbyintf(float)\n"
+      "declare x86_fp80 @nearbyintl(x86_fp80)\n"
+      "declare i32 @pclose(%struct*)\n"
+      "declare void @perror(i8*)\n"
+      "declare i32 @posix_memalign(i8**, i64, i64)\n"
+      "declare double @pow(double, double)\n"
+      "declare float @powf(float, float)\n"
+      "declare x86_fp80 @powl(x86_fp80, x86_fp80)\n"
+      "declare i32 @printf(i8*, ...)\n"
+      "declare i32 @putc(i32, %struct*)\n"
+      "declare i32 @putc_unlocked(i32, %struct*)\n"
+      "declare i32 @putchar(i32)\n"
+      "declare i32 @putchar_unlocked(i32)\n"
+      "declare i32 @puts(i8*)\n"
+      "declare void @qsort(i8*, i64, i64, i32 (i8*, i8*)*)\n"
+      "declare i64 @readlink(i8*, i8*, i64)\n"
+      "declare i8* @realloc(i8*, i64)\n"
+      "declare i8* @reallocf(i8*, i64)\n"
+      "declare i32 @remove(i8*)\n"
+      "declare i32 @rename(i8*, i8*)\n"
+      "declare void @rewind(%struct*)\n"
+      "declare double @rint(double)\n"
+      "declare float @rintf(float)\n"
+      "declare x86_fp80 @rintl(x86_fp80)\n"
+      "declare i32 @rmdir(i8*)\n"
+      "declare double @round(double)\n"
+      "declare float @roundf(float)\n"
+      "declare x86_fp80 @roundl(x86_fp80)\n"
+      "declare i32 @scanf(i8*, ...)\n"
+      "declare void @setbuf(%struct*, i8*)\n"
+      "declare i32 @setitimer(i32, %struct*, %struct*)\n"
+      "declare i32 @setvbuf(%struct*, i8*, i32, i64)\n"
+      "declare double @sin(double)\n"
+      "declare float @sinf(float)\n"
+      "declare double @sinh(double)\n"
+      "declare float @sinhf(float)\n"
+      "declare x86_fp80 @sinhl(x86_fp80)\n"
+      "declare x86_fp80 @sinl(x86_fp80)\n"
+      "declare i32 @snprintf(i8*, i64, i8*, ...)\n"
+      "declare i32 @sprintf(i8*, i8*, ...)\n"
+      "declare double @sqrt(double)\n"
+      "declare float @sqrtf(float)\n"
+      "declare x86_fp80 @sqrtl(x86_fp80)\n"
+      "declare i32 @sscanf(i8*, i8*, ...)\n"
+      "declare i32 @statvfs(i8*, %struct*)\n"
+      "declare i8* @stpcpy(i8*, i8*)\n"
+      "declare i8* @stpncpy(i8*, i8*, i64)\n"
+      "declare i32 @strcasecmp(i8*, i8*)\n"
+      "declare i8* @strcat(i8*, i8*)\n"
+      "declare i8* @strchr(i8*, i32)\n"
+      "declare i32 @strcmp(i8*, i8*)\n"
+      "declare i32 @strcoll(i8*, i8*)\n"
+      "declare i8* @strcpy(i8*, i8*)\n"
+      "declare i64 @strcspn(i8*, i8*)\n"
+      "declare i8* @strdup(i8*)\n"
+      "declare i64 @strlen(i8*)\n"
+      "declare i32 @strncasecmp(i8*, i8*, i64)\n"
+      "declare i8* @strncat(i8*, i8*, i64)\n"
+      "declare i32 @strncmp(i8*, i8*, i64)\n"
+      "declare i8* @strncpy(i8*, i8*, i64)\n"
+      "declare i8* @strndup(i8*, i64)\n"
+      "declare i64 @strnlen(i8*, i64)\n"
+      "declare i8* @strpbrk(i8*, i8*)\n"
+      "declare i8* @strrchr(i8*, i32)\n"
+      "declare i64 @strspn(i8*, i8*)\n"
+      "declare i8* @strstr(i8*, i8*)\n"
+      "declare i8* @strtok(i8*, i8*)\n"
+      "declare i8* @strtok_r(i8*, i8*, i8**)\n"
+      "declare i64 @strtol(i8*, i8**, i32)\n"
+      "declare x86_fp80 @strtold(i8*, i8**)\n"
+      "declare i64 @strtoll(i8*, i8**, i32)\n"
+      "declare i64 @strtoul(i8*, i8**, i32)\n"
+      "declare i64 @strtoull(i8*, i8**, i32)\n"
+      "declare i64 @strxfrm(i8*, i8*, i64)\n"
+      "declare double @tan(double)\n"
+      "declare float @tanf(float)\n"
+      "declare double @tanh(double)\n"
+      "declare float @tanhf(float)\n"
+      "declare x86_fp80 @tanhl(x86_fp80)\n"
+      "declare x86_fp80 @tanl(x86_fp80)\n"
+      "declare i64 @times(%struct*)\n"
+      "declare %struct* @tmpfile()\n"
+      "declare i32 @_Z7toasciii(i32)\n"
+      "declare double @trunc(double)\n"
+      "declare float @truncf(float)\n"
+      "declare x86_fp80 @truncl(x86_fp80)\n"
+      "declare i32 @uname(%struct*)\n"
+      "declare i32 @ungetc(i32, %struct*)\n"
+      "declare i32 @unlink(i8*)\n"
+      "declare i32 @utime(i8*, %struct*)\n"
+      "declare i32 @utimes(i8*, %struct*)\n"
+      "declare i8* @valloc(i64)\n"
+      "declare i32 @vfprintf(%struct*, i8*, %struct*)\n"
+      "declare i32 @vfscanf(%struct*, i8*, %struct*)\n"
+      "declare i32 @vprintf(i8*, %struct*)\n"
+      "declare i32 @vscanf(i8*, %struct*)\n"
+      "declare i32 @vsnprintf(i8*, i64, i8*, %struct*)\n"
+      "declare i32 @vsprintf(i8*, i8*, %struct*)\n"
+      "declare i32 @vsscanf(i8*, i8*, %struct*)\n"
+      "declare i64 @wcslen(i32*)\n"
+      "declare i32 @fork()\n"
+      "declare i32 @execl(i8*, i8*, ...)\n"
+      "declare i32 @execle(i8*, i8*, ...)\n"
+      "declare i32 @execlp(i8*, i8*, ...)\n"
+      "declare i32 @execv(i8*, i8**)\n"
+      "declare i32 @execvP(i8*, i8*, i8**)\n"
+      "declare i32 @execve(i8*, i8**, i8**)\n"
+      "declare i32 @execvp(i8*, i8**)\n"
+      "declare i32 @execvpe(i8*, i8**, i8**)\n"
+
+      // These functions were also extracted from the OS X headers, but they are
+      // available with a special name on darwin.
+      // This test uses the default TLI name instead.
+      "declare i32 @chmod(i8*, i16)\n"
+      "declare i32 @closedir(%struct*)\n"
+      "declare %struct* @fdopen(i32, i8*)\n"
+      "declare %struct* @fopen(i8*, i8*)\n"
+      "declare i32 @fputs(i8*, %struct*)\n"
+      "declare i32 @fputs_unlocked(i8*, %struct*)\n"
+      "declare i32 @fstat(i32, %struct*)\n"
+      "declare i64 @fwrite(i8*, i64, i64, %struct*)\n"
+      "declare i64 @fwrite_unlocked(i8*, i64, i64, %struct*)\n"
+      "declare i32 @lchown(i8*, i32, i32)\n"
+      "declare i32 @lstat(i8*, %struct*)\n"
+      "declare i64 @mktime(%struct*)\n"
+      "declare i32 @open(i8*, i32, ...)\n"
+      "declare %struct* @opendir(i8*)\n"
+      "declare %struct* @popen(i8*, i8*)\n"
+      "declare i64 @pread(i32, i8*, i64, i64)\n"
+      "declare i64 @pwrite(i32, i8*, i64, i64)\n"
+      "declare i64 @read(i32, i8*, i64)\n"
+      "declare i8* @realpath(i8*, i8*)\n"
+      "declare i32 @stat(i8*, %struct*)\n"
+      "declare double @strtod(i8*, i8**)\n"
+      "declare float @strtof(i8*, i8**)\n"
+      "declare i32 @system(i8*)\n"
+      "declare i32 @unsetenv(i8*)\n"
+      "declare i64 @write(i32, i8*, i64)\n"
+
+      // These functions are available on Linux but not Darwin; they only differ
+      // from their non-64 counterparts in the struct type.
+      // Use the same prototype as the non-64 variant.
+      "declare %struct* @fopen64(i8*, i8*)\n"
+      "declare i32 @fstat64(i32, %struct*)\n"
+      "declare i32 @fstatvfs64(i32, %struct*)\n"
+      "declare i32 @lstat64(i8*, %struct*)\n"
+      "declare i32 @open64(i8*, i32, ...)\n"
+      "declare i32 @stat64(i8*, %struct*)\n"
+      "declare i32 @statvfs64(i8*, %struct*)\n"
+      "declare %struct* @tmpfile64()\n"
+
+      // These functions are also -64 variants, but do differ in the type of the
+      // off_t (vs off64_t) parameter.  The non-64 variants declared above used
+      // a 64-bit off_t, so, in practice, they are also equivalent.
+      "declare i32 @fseeko64(%struct*, i64, i32)\n"
+      "declare i64 @ftello64(%struct*)\n"
+
+      "declare void @_ZdaPv(i8*)\n"
+      "declare void @_ZdaPvRKSt9nothrow_t(i8*, %struct*)\n"
+      "declare void @_ZdaPvSt11align_val_t(i8*, i64)\n"
+      "declare void @_ZdaPvSt11align_val_tRKSt9nothrow_t(i8*, i64, %struct*)\n"
+      "declare void @_ZdaPvj(i8*, i32)\n"
+      "declare void @_ZdaPvm(i8*, i64)\n"
+      "declare void @_ZdlPv(i8*)\n"
+      "declare void @_ZdlPvRKSt9nothrow_t(i8*, %struct*)\n"
+      "declare void @_ZdlPvSt11align_val_t(i8*, i64)\n"
+      "declare void @_ZdlPvSt11align_val_tRKSt9nothrow_t(i8*, i64, %struct*)\n"
+      "declare void @_ZdlPvj(i8*, i32)\n"
+      "declare void @_ZdlPvm(i8*, i64)\n"
+      "declare i8* @_Znaj(i32)\n"
+      "declare i8* @_ZnajRKSt9nothrow_t(i32, %struct*)\n"
+      "declare i8* @_ZnajSt11align_val_t(i32, i32)\n"
+      "declare i8* @_ZnajSt11align_val_tRKSt9nothrow_t(i32, i32, %struct*)\n"
+      "declare i8* @_Znam(i64)\n"
+      "declare i8* @_ZnamRKSt9nothrow_t(i64, %struct*)\n"
+      "declare i8* @_ZnamSt11align_val_t(i64, i64)\n"
+      "declare i8* @_ZnamSt11align_val_tRKSt9nothrow_t(i64, i64, %struct*)\n"
+      "declare i8* @_Znwj(i32)\n"
+      "declare i8* @_ZnwjRKSt9nothrow_t(i32, %struct*)\n"
+      "declare i8* @_ZnwjSt11align_val_t(i32, i32)\n"
+      "declare i8* @_ZnwjSt11align_val_tRKSt9nothrow_t(i32, i32, %struct*)\n"
+      "declare i8* @_Znwm(i64)\n"
+      "declare i8* @_ZnwmRKSt9nothrow_t(i64, %struct*)\n"
+      "declare i8* @_ZnwmSt11align_val_t(i64, i64)\n"
+      "declare i8* @_ZnwmSt11align_val_tRKSt9nothrow_t(i64, i64, %struct*)\n"
+
+      "declare void @\"??3@YAXPEAX@Z\"(i8*)\n"
+      "declare void @\"??3@YAXPEAXAEBUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+      "declare void @\"??3@YAXPEAX_K@Z\"(i8*, i64)\n"
+      "declare void @\"??_V@YAXPEAX@Z\"(i8*)\n"
+      "declare void @\"??_V@YAXPEAXAEBUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+      "declare void @\"??_V@YAXPEAX_K@Z\"(i8*, i64)\n"
+      "declare i8* @\"??2@YAPAXI@Z\"(i32)\n"
+      "declare i8* @\"??2@YAPAXIABUnothrow_t@std@@@Z\"(i32, %struct*)\n"
+      "declare i8* @\"??2@YAPEAX_K@Z\"(i64)\n"
+      "declare i8* @\"??2@YAPEAX_KAEBUnothrow_t@std@@@Z\"(i64, %struct*)\n"
+      "declare i8* @\"??_U@YAPAXI@Z\"(i32)\n"
+      "declare i8* @\"??_U@YAPAXIABUnothrow_t@std@@@Z\"(i32, %struct*)\n"
+      "declare i8* @\"??_U@YAPEAX_K@Z\"(i64)\n"
+      "declare i8* @\"??_U@YAPEAX_KAEBUnothrow_t@std@@@Z\"(i64, %struct*)\n"
+
+      "declare void @\"??3@YAXPAX@Z\"(i8*)\n"
+      "declare void @\"??3@YAXPAXABUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+      "declare void @\"??3@YAXPAXI@Z\"(i8*, i32)\n"
+      "declare void @\"??_V@YAXPAX@Z\"(i8*)\n"
+      "declare void @\"??_V@YAXPAXABUnothrow_t@std@@@Z\"(i8*, %struct*)\n"
+      "declare void @\"??_V@YAXPAXI@Z\"(i8*, i32)\n"
+
+      // These other functions were derived from the .def C declaration.
+      "declare i32 @__cxa_atexit(void (i8*)*, i8*, i8*)\n"
+      "declare void @__cxa_guard_abort(%struct*)\n"
+      "declare i32 @__cxa_guard_acquire(%struct*)\n"
+      "declare void @__cxa_guard_release(%struct*)\n"
+
+      "declare i32 @__nvvm_reflect(i8*)\n"
+
+      "declare i8* @__memcpy_chk(i8*, i8*, i64, i64)\n"
+      "declare i8* @__memmove_chk(i8*, i8*, i64, i64)\n"
+      "declare i8* @__memset_chk(i8*, i32, i64, i64)\n"
+      "declare i8* @__stpcpy_chk(i8*, i8*, i64)\n"
+      "declare i8* @__stpncpy_chk(i8*, i8*, i64, i64)\n"
+      "declare i8* @__strcpy_chk(i8*, i8*, i64)\n"
+      "declare i8* @__strncpy_chk(i8*, i8*, i64, i64)\n"
+
+      "declare i8* @memalign(i64, i64)\n"
+      "declare i8* @mempcpy(i8*, i8*, i64)\n"
+      "declare i8* @memrchr(i8*, i32, i64)\n"
+
+      // These are similar to the FILE* fgetc/fputc.
+      "declare i32 @_IO_getc(%struct*)\n"
+      "declare i32 @_IO_putc(i32, %struct*)\n"
+
+      "declare i32 @__isoc99_scanf(i8*, ...)\n"
+      "declare i32 @__isoc99_sscanf(i8*, i8*, ...)\n"
+      "declare i8* @__strdup(i8*)\n"
+      "declare i8* @__strndup(i8*, i64)\n"
+      "declare i8* @__strtok_r(i8*, i8*, i8**)\n"
+
+      "declare double @__sqrt_finite(double)\n"
+      "declare float @__sqrtf_finite(float)\n"
+      "declare x86_fp80 @__sqrtl_finite(x86_fp80)\n"
+      "declare double @exp10(double)\n"
+      "declare float @exp10f(float)\n"
+      "declare x86_fp80 @exp10l(x86_fp80)\n"
+
+      // These printf variants have the same prototype as the non-'i' versions.
+      "declare i32 @fiprintf(%struct*, i8*, ...)\n"
+      "declare i32 @iprintf(i8*, ...)\n"
+      "declare i32 @siprintf(i8*, i8*, ...)\n"
+
+      "declare i32 @htonl(i32)\n"
+      "declare i16 @htons(i16)\n"
+      "declare i32 @ntohl(i32)\n"
+      "declare i16 @ntohs(i16)\n"
+
+      "declare i32 @isascii(i32)\n"
+      "declare i32 @isdigit(i32)\n"
+      "declare i32 @toascii(i32)\n"
+
+      // These functions were extracted from math-finite.h which provides
+      // functions similar to those in math.h, but optimized for handling
+      // finite values only.
+      "declare double @__acos_finite(double)\n"
+      "declare float @__acosf_finite(float)\n"
+      "declare x86_fp80 @__acosl_finite(x86_fp80)\n"
+      "declare double @__acosh_finite(double)\n"
+      "declare float @__acoshf_finite(float)\n"
+      "declare x86_fp80 @__acoshl_finite(x86_fp80)\n"
+      "declare double @__asin_finite(double)\n"
+      "declare float @__asinf_finite(float)\n"
+      "declare x86_fp80 @__asinl_finite(x86_fp80)\n"
+      "declare double @__atan2_finite(double, double)\n"
+      "declare float @__atan2f_finite(float, float)\n"
+      "declare x86_fp80 @__atan2l_finite(x86_fp80, x86_fp80)\n"
+      "declare double @__atanh_finite(double)\n"
+      "declare float @__atanhf_finite(float)\n"
+      "declare x86_fp80 @__atanhl_finite(x86_fp80)\n"
+      "declare double @__cosh_finite(double)\n"
+      "declare float @__coshf_finite(float)\n"
+      "declare x86_fp80 @__coshl_finite(x86_fp80)\n"
+      "declare double @__exp10_finite(double)\n"
+      "declare float @__exp10f_finite(float)\n"
+      "declare x86_fp80 @__exp10l_finite(x86_fp80)\n"
+      "declare double @__exp2_finite(double)\n"
+      "declare float @__exp2f_finite(float)\n"
+      "declare x86_fp80 @__exp2l_finite(x86_fp80)\n"
+      "declare double @__exp_finite(double)\n"
+      "declare float @__expf_finite(float)\n"
+      "declare x86_fp80 @__expl_finite(x86_fp80)\n"
+      "declare double @__log10_finite(double)\n"
+      "declare float @__log10f_finite(float)\n"
+      "declare x86_fp80 @__log10l_finite(x86_fp80)\n"
+      "declare double @__log2_finite(double)\n"
+      "declare float @__log2f_finite(float)\n"
+      "declare x86_fp80 @__log2l_finite(x86_fp80)\n"
+      "declare double @__log_finite(double)\n"
+      "declare float @__logf_finite(float)\n"
+      "declare x86_fp80 @__logl_finite(x86_fp80)\n"
+      "declare double @__pow_finite(double, double)\n"
+      "declare float @__powf_finite(float, float)\n"
+      "declare x86_fp80 @__powl_finite(x86_fp80, x86_fp80)\n"
+      "declare double @__sinh_finite(double)\n"
+      "declare float @__sinhf_finite(float)\n"
+      "declare x86_fp80 @__sinhl_finite(x86_fp80)\n"
+      );
+
+  for (unsigned FI = 0; FI != LibFunc::NumLibFuncs; ++FI) {
+    LibFunc LF = (LibFunc)FI;
+    // Make sure everything is available; we're not testing target defaults.
+    TLII.setAvailable(LF);
+    Function *F = M->getFunction(TLI.getName(LF));
+    EXPECT_TRUE(isLibFunc(F, LF));
+  }
+}
diff --git a/src/llvm-project/llvm/unittests/Analysis/UnrollAnalyzerTest.cpp b/src/llvm-project/llvm/unittests/Analysis/UnrollAnalyzerTest.cpp
new file mode 100644
index 0000000..937e69f
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/UnrollAnalyzerTest.cpp
@@ -0,0 +1,330 @@
+//===- UnrollAnalyzerTest.cpp - UnrollAnalyzer unit tests -----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/LoopUnrollAnalyzer.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Support/SourceMgr.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+namespace llvm {
+void initializeUnrollAnalyzerTestPass(PassRegistry &);
+
+static SmallVector<DenseMap<Value *, Constant *>, 16> SimplifiedValuesVector;
+static unsigned TripCount = 0;
+
+namespace {
+struct UnrollAnalyzerTest : public FunctionPass {
+  static char ID;
+  bool runOnFunction(Function &F) override {
+    LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
+    ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
+
+    Function::iterator FI = F.begin();
+    FI++; // First basic block is entry - skip it.
+    BasicBlock *Header = &*FI++;
+    Loop *L = LI->getLoopFor(Header);
+    BasicBlock *Exiting = L->getExitingBlock();
+
+    SimplifiedValuesVector.clear();
+    TripCount = SE->getSmallConstantTripCount(L, Exiting);
+    for (unsigned Iteration = 0; Iteration < TripCount; Iteration++) {
+      DenseMap<Value *, Constant *> SimplifiedValues;
+      UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, *SE, L);
+      for (auto *BB : L->getBlocks())
+        for (Instruction &I : *BB)
+          Analyzer.visit(I);
+      SimplifiedValuesVector.push_back(SimplifiedValues);
+    }
+    return false;
+  }
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.addRequired<DominatorTreeWrapperPass>();
+    AU.addRequired<LoopInfoWrapperPass>();
+    AU.addRequired<ScalarEvolutionWrapperPass>();
+    AU.setPreservesAll();
+  }
+  UnrollAnalyzerTest() : FunctionPass(ID) {
+    initializeUnrollAnalyzerTestPass(*PassRegistry::getPassRegistry());
+  }
+};
+}
+
+char UnrollAnalyzerTest::ID = 0;
+
+std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
+                                       const char *ModuleStr) {
+  SMDiagnostic Err;
+  return parseAssemblyString(ModuleStr, Err, Context);
+}
+
+TEST(UnrollAnalyzerTest, BasicSimplifications) {
+  const char *ModuleStr =
+      "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+      "define i64 @propagate_loop_phis() {\n"
+      "entry:\n"
+      "  br label %loop\n"
+      "loop:\n"
+      "  %iv = phi i64 [ 0, %entry ], [ %inc, %loop ]\n"
+      "  %x0 = phi i64 [ 0, %entry ], [ %x2, %loop ]\n"
+      "  %x1 = or i64 %x0, 1\n"
+      "  %x2 = or i64 %x1, 2\n"
+      "  %inc = add nuw nsw i64 %iv, 1\n"
+      "  %cond = icmp sge i64 %inc, 8\n"
+      "  br i1 %cond, label %loop.end, label %loop\n"
+      "loop.end:\n"
+      "  %x.lcssa = phi i64 [ %x2, %loop ]\n"
+      "  ret i64 %x.lcssa\n"
+      "}\n";
+  UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+  legacy::PassManager Passes;
+  Passes.add(P);
+  Passes.run(*M);
+
+  // Perform checks
+  Module::iterator MI = M->begin();
+  Function *F = &*MI++;
+  Function::iterator FI = F->begin();
+  FI++; // First basic block is entry - skip it.
+  BasicBlock *Header = &*FI++;
+
+  BasicBlock::iterator BBI = Header->begin();
+  std::advance(BBI, 4);
+  Instruction *Y1 = &*BBI++;
+  Instruction *Y2 = &*BBI++;
+  // Check simplification expected on the 1st iteration.
+  // Check that "%inc = add nuw nsw i64 %iv, 1" is simplified to 1
+  auto I1 = SimplifiedValuesVector[0].find(Y1);
+  EXPECT_TRUE(I1 != SimplifiedValuesVector[0].end());
+  EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 1U);
+
+  // Check that "%cond = icmp sge i64 %inc, 10" is simplified to false
+  auto I2 = SimplifiedValuesVector[0].find(Y2);
+  EXPECT_TRUE(I2 != SimplifiedValuesVector[0].end());
+  EXPECT_FALSE(cast<ConstantInt>((*I2).second)->getZExtValue());
+
+  // Check simplification expected on the last iteration.
+  // Check that "%inc = add nuw nsw i64 %iv, 1" is simplified to 8
+  I1 = SimplifiedValuesVector[TripCount - 1].find(Y1);
+  EXPECT_TRUE(I1 != SimplifiedValuesVector[TripCount - 1].end());
+  EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), TripCount);
+
+  // Check that "%cond = icmp sge i64 %inc, 10" is simplified to false
+  I2 = SimplifiedValuesVector[TripCount - 1].find(Y2);
+  EXPECT_TRUE(I2 != SimplifiedValuesVector[TripCount - 1].end());
+  EXPECT_TRUE(cast<ConstantInt>((*I2).second)->getZExtValue());
+}
+
+TEST(UnrollAnalyzerTest, OuterLoopSimplification) {
+  const char *ModuleStr =
+      "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+      "define void @foo() {\n"
+      "entry:\n"
+      "  br label %outer.loop\n"
+      "outer.loop:\n"
+      "  %iv.outer = phi i64 [ 0, %entry ], [ %iv.outer.next, %outer.loop.latch ]\n"
+      "  %iv.outer.next = add nuw nsw i64 %iv.outer, 1\n"
+      "  br label %inner.loop\n"
+      "inner.loop:\n"
+      "  %iv.inner = phi i64 [ 0, %outer.loop ], [ %iv.inner.next, %inner.loop ]\n"
+      "  %iv.inner.next = add nuw nsw i64 %iv.inner, 1\n"
+      "  %exitcond.inner = icmp eq i64 %iv.inner.next, 1000\n"
+      "  br i1 %exitcond.inner, label %outer.loop.latch, label %inner.loop\n"
+      "outer.loop.latch:\n"
+      "  %exitcond.outer = icmp eq i64 %iv.outer.next, 40\n"
+      "  br i1 %exitcond.outer, label %exit, label %outer.loop\n"
+      "exit:\n"
+      "  ret void\n"
+      "}\n";
+
+  UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+  legacy::PassManager Passes;
+  Passes.add(P);
+  Passes.run(*M);
+
+  Module::iterator MI = M->begin();
+  Function *F = &*MI++;
+  Function::iterator FI = F->begin();
+  FI++;
+  BasicBlock *Header = &*FI++;
+  BasicBlock *InnerBody = &*FI++;
+
+  BasicBlock::iterator BBI = Header->begin();
+  BBI++;
+  Instruction *Y1 = &*BBI;
+  BBI = InnerBody->begin();
+  BBI++;
+  Instruction *Y2 = &*BBI;
+  // Check that we can simplify IV of the outer loop, but can't simplify the IV
+  // of the inner loop if we only know the iteration number of the outer loop.
+  //
+  //  Y1 is %iv.outer.next, Y2 is %iv.inner.next
+  auto I1 = SimplifiedValuesVector[0].find(Y1);
+  EXPECT_TRUE(I1 != SimplifiedValuesVector[0].end());
+  auto I2 = SimplifiedValuesVector[0].find(Y2);
+  EXPECT_TRUE(I2 == SimplifiedValuesVector[0].end());
+}
+TEST(UnrollAnalyzerTest, CmpSimplifications) {
+  const char *ModuleStr =
+      "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+      "define void @branch_iv_trunc() {\n"
+      "entry:\n"
+      "  br label %for.body\n"
+      "for.body:\n"
+      "  %indvars.iv = phi i64 [ 0, %entry ], [ %tmp3, %for.body ]\n"
+      "  %tmp2 = trunc i64 %indvars.iv to i32\n"
+      "  %cmp3 = icmp eq i32 %tmp2, 5\n"
+      "  %tmp3 = add nuw nsw i64 %indvars.iv, 1\n"
+      "  %exitcond = icmp eq i64 %tmp3, 10\n"
+      "  br i1 %exitcond, label %for.end, label %for.body\n"
+      "for.end:\n"
+      "  ret void\n"
+      "}\n";
+  UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+  legacy::PassManager Passes;
+  Passes.add(P);
+  Passes.run(*M);
+
+  // Perform checks
+  Module::iterator MI = M->begin();
+  Function *F = &*MI++;
+  Function::iterator FI = F->begin();
+  FI++; // First basic block is entry - skip it.
+  BasicBlock *Header = &*FI++;
+
+  BasicBlock::iterator BBI = Header->begin();
+  BBI++;
+  Instruction *Y1 = &*BBI++;
+  Instruction *Y2 = &*BBI++;
+  // Check simplification expected on the 5th iteration.
+  // Check that "%tmp2 = trunc i64 %indvars.iv to i32" is simplified to 5
+  // and "%cmp3 = icmp eq i32 %tmp2, 5" is simplified to 1 (i.e. true).
+  auto I1 = SimplifiedValuesVector[5].find(Y1);
+  EXPECT_TRUE(I1 != SimplifiedValuesVector[5].end());
+  EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 5U);
+  auto I2 = SimplifiedValuesVector[5].find(Y2);
+  EXPECT_TRUE(I2 != SimplifiedValuesVector[5].end());
+  EXPECT_EQ(cast<ConstantInt>((*I2).second)->getZExtValue(), 1U);
+}
+TEST(UnrollAnalyzerTest, PtrCmpSimplifications) {
+  const char *ModuleStr =
+      "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+      "define void @ptr_cmp(i8 *%a) {\n"
+      "entry:\n"
+      "  %limit = getelementptr i8, i8* %a, i64 40\n"
+      "  %start.iv2 = getelementptr i8, i8* %a, i64 7\n"
+      "  br label %loop.body\n"
+      "loop.body:\n"
+      "  %iv.0 = phi i8* [ %a, %entry ], [ %iv.1, %loop.body ]\n"
+      "  %iv2.0 = phi i8* [ %start.iv2, %entry ], [ %iv2.1, %loop.body ]\n"
+      "  %cmp = icmp eq i8* %iv2.0, %iv.0\n"
+      "  %iv.1 = getelementptr inbounds i8, i8* %iv.0, i64 1\n"
+      "  %iv2.1 = getelementptr inbounds i8, i8* %iv2.0, i64 1\n"
+      "  %exitcond = icmp ne i8* %iv.1, %limit\n"
+      "  br i1 %exitcond, label %loop.body, label %loop.exit\n"
+      "loop.exit:\n"
+      "  ret void\n"
+      "}\n";
+  UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+  legacy::PassManager Passes;
+  Passes.add(P);
+  Passes.run(*M);
+
+  // Perform checks
+  Module::iterator MI = M->begin();
+  Function *F = &*MI++;
+  Function::iterator FI = F->begin();
+  FI++; // First basic block is entry - skip it.
+  BasicBlock *Header = &*FI;
+
+  BasicBlock::iterator BBI = Header->begin();
+  std::advance(BBI, 2);
+  Instruction *Y1 = &*BBI;
+  // Check simplification expected on the 5th iteration.
+  // Check that "%cmp = icmp eq i8* %iv2.0, %iv.0" is simplified to 0.
+  auto I1 = SimplifiedValuesVector[5].find(Y1);
+  EXPECT_TRUE(I1 != SimplifiedValuesVector[5].end());
+  EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 0U);
+}
+TEST(UnrollAnalyzerTest, CastSimplifications) {
+  const char *ModuleStr =
+      "target datalayout = \"e-m:o-i64:64-f80:128-n8:16:32:64-S128\"\n"
+      "@known_constant = internal unnamed_addr constant [10 x i32] [i32 0, i32 1, i32 0, i32 1, i32 0, i32 259, i32 0, i32 1, i32 0, i32 1], align 16\n"
+      "define void @const_load_cast() {\n"
+      "entry:\n"
+      "  br label %loop\n"
+      "\n"
+      "loop:\n"
+      "  %iv = phi i64 [ 0, %entry ], [ %inc, %loop ]\n"
+      "  %array_const_idx = getelementptr inbounds [10 x i32], [10 x i32]* @known_constant, i64 0, i64 %iv\n"
+      "  %const_array_element = load i32, i32* %array_const_idx, align 4\n"
+      "  %se = sext i32 %const_array_element to i64\n"
+      "  %ze = zext i32 %const_array_element to i64\n"
+      "  %tr = trunc i32 %const_array_element to i8\n"
+      "  %inc = add nuw nsw i64 %iv, 1\n"
+      "  %exitcond86.i = icmp eq i64 %inc, 10\n"
+      "  br i1 %exitcond86.i, label %loop.end, label %loop\n"
+      "\n"
+      "loop.end:\n"
+      "  ret void\n"
+      "}\n";
+
+  UnrollAnalyzerTest *P = new UnrollAnalyzerTest();
+  LLVMContext Context;
+  std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleStr);
+  legacy::PassManager Passes;
+  Passes.add(P);
+  Passes.run(*M);
+
+  // Perform checks
+  Module::iterator MI = M->begin();
+  Function *F = &*MI++;
+  Function::iterator FI = F->begin();
+  FI++; // First basic block is entry - skip it.
+  BasicBlock *Header = &*FI++;
+
+  BasicBlock::iterator BBI = Header->begin();
+  std::advance(BBI, 3);
+  Instruction *Y1 = &*BBI++;
+  Instruction *Y2 = &*BBI++;
+  Instruction *Y3 = &*BBI++;
+  // Check simplification expected on the 5th iteration.
+  // "%se = sext i32 %const_array_element to i64" should be simplified to 259,
+  // "%ze = zext i32 %const_array_element to i64" should be simplified to 259,
+  // "%tr = trunc i32 %const_array_element to i8" should be simplified to 3.
+  auto I1 = SimplifiedValuesVector[5].find(Y1);
+  EXPECT_TRUE(I1 != SimplifiedValuesVector[5].end());
+  EXPECT_EQ(cast<ConstantInt>((*I1).second)->getZExtValue(), 259U);
+  auto I2 = SimplifiedValuesVector[5].find(Y2);
+  EXPECT_TRUE(I2 != SimplifiedValuesVector[5].end());
+  EXPECT_EQ(cast<ConstantInt>((*I2).second)->getZExtValue(), 259U);
+  auto I3 = SimplifiedValuesVector[5].find(Y3);
+  EXPECT_TRUE(I3 != SimplifiedValuesVector[5].end());
+  EXPECT_EQ(cast<ConstantInt>((*I3).second)->getZExtValue(), 3U);
+}
+
+} // end namespace llvm
+
+INITIALIZE_PASS_BEGIN(UnrollAnalyzerTest, "unrollanalyzertestpass",
+                      "unrollanalyzertestpass", false, false)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
+INITIALIZE_PASS_END(UnrollAnalyzerTest, "unrollanalyzertestpass",
+                    "unrollanalyzertestpass", false, false)
diff --git a/src/llvm-project/llvm/unittests/Analysis/ValueLatticeTest.cpp b/src/llvm-project/llvm/unittests/Analysis/ValueLatticeTest.cpp
new file mode 100644
index 0000000..b0b3797
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/ValueLatticeTest.cpp
@@ -0,0 +1,187 @@
+//===- ValueLatticeTest.cpp - ScalarEvolution unit tests --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/ValueLattice.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "gtest/gtest.h"
+
+namespace llvm {
+namespace {
+
+// We use this fixture to ensure that we clean up ScalarEvolution before
+// deleting the PassManager.
+class ValueLatticeTest : public testing::Test {
+protected:
+  LLVMContext Context;
+  Module M;
+
+  ValueLatticeTest() : M("", Context) {}
+};
+
+TEST_F(ValueLatticeTest, ValueLatticeGetters) {
+  auto I32Ty = IntegerType::get(Context, 32);
+  auto *C1 = ConstantInt::get(I32Ty, 1);
+
+  EXPECT_TRUE(ValueLatticeElement::get(C1).isConstantRange());
+  EXPECT_TRUE(
+      ValueLatticeElement::getRange({C1->getValue()}).isConstantRange());
+  EXPECT_TRUE(ValueLatticeElement::getOverdefined().isOverdefined());
+
+  auto FloatTy = Type::getFloatTy(Context);
+  auto *C2 = ConstantFP::get(FloatTy, 1.1);
+  EXPECT_TRUE(ValueLatticeElement::get(C2).isConstant());
+  EXPECT_TRUE(ValueLatticeElement::getNot(C2).isNotConstant());
+}
+
+TEST_F(ValueLatticeTest, MergeIn) {
+  auto I32Ty = IntegerType::get(Context, 32);
+  auto *C1 = ConstantInt::get(I32Ty, 1);
+
+  // Merge to lattice values with equal integer constant.
+  auto LV1 = ValueLatticeElement::get(C1);
+  EXPECT_FALSE(LV1.mergeIn(ValueLatticeElement::get(C1), M.getDataLayout()));
+  EXPECT_TRUE(LV1.isConstantRange());
+  EXPECT_EQ(LV1.asConstantInteger().getValue().getLimitedValue(), 1U);
+
+  // Merge LV1 with different integer constant.
+  EXPECT_TRUE(LV1.mergeIn(ValueLatticeElement::get(ConstantInt::get(I32Ty, 99)),
+                          M.getDataLayout()));
+  EXPECT_TRUE(LV1.isConstantRange());
+  EXPECT_EQ(LV1.getConstantRange().getLower().getLimitedValue(), 1U);
+  EXPECT_EQ(LV1.getConstantRange().getUpper().getLimitedValue(), 100U);
+
+  // Merge constant range with same constant range.
+  EXPECT_FALSE(LV1.mergeIn(LV1, M.getDataLayout()));
+  EXPECT_TRUE(LV1.isConstantRange());
+  EXPECT_EQ(LV1.getConstantRange().getLower().getLimitedValue(), 1U);
+  EXPECT_EQ(LV1.getConstantRange().getUpper().getLimitedValue(), 100U);
+
+  // Merge LV1 in undefined value.
+  ValueLatticeElement LV2;
+  EXPECT_TRUE(LV2.mergeIn(LV1, M.getDataLayout()));
+  EXPECT_TRUE(LV1.isConstantRange());
+  EXPECT_EQ(LV1.getConstantRange().getLower().getLimitedValue(), 1U);
+  EXPECT_EQ(LV1.getConstantRange().getUpper().getLimitedValue(), 100U);
+  EXPECT_TRUE(LV2.isConstantRange());
+  EXPECT_EQ(LV2.getConstantRange().getLower().getLimitedValue(), 1U);
+  EXPECT_EQ(LV2.getConstantRange().getUpper().getLimitedValue(), 100U);
+
+  // Merge LV1 with overdefined.
+  EXPECT_TRUE(
+      LV1.mergeIn(ValueLatticeElement::getOverdefined(), M.getDataLayout()));
+  EXPECT_TRUE(LV1.isOverdefined());
+
+  // Merge overdefined with overdefined.
+  EXPECT_FALSE(
+      LV1.mergeIn(ValueLatticeElement::getOverdefined(), M.getDataLayout()));
+  EXPECT_TRUE(LV1.isOverdefined());
+}
+
+TEST_F(ValueLatticeTest, getCompareIntegers) {
+  auto *I32Ty = IntegerType::get(Context, 32);
+  auto *I1Ty = IntegerType::get(Context, 1);
+  auto *C1 = ConstantInt::get(I32Ty, 1);
+  auto LV1 = ValueLatticeElement::get(C1);
+
+  // Check getCompare for equal integer constants.
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_EQ, I1Ty, LV1)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SGE, I1Ty, LV1)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SLE, I1Ty, LV1)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_NE, I1Ty, LV1)->isZeroValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SLT, I1Ty, LV1)->isZeroValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SGT, I1Ty, LV1)->isZeroValue());
+
+  auto LV2 =
+      ValueLatticeElement::getRange({APInt(32, 10, true), APInt(32, 20, true)});
+  // Check getCompare with distinct integer ranges.
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SLT, I1Ty, LV2)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SLE, I1Ty, LV2)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_NE, I1Ty, LV2)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_EQ, I1Ty, LV2)->isZeroValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SGE, I1Ty, LV2)->isZeroValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::ICMP_SGT, I1Ty, LV2)->isZeroValue());
+
+  auto LV3 =
+      ValueLatticeElement::getRange({APInt(32, 15, true), APInt(32, 19, true)});
+  // Check getCompare with a subset integer ranges.
+  EXPECT_EQ(LV2.getCompare(CmpInst::ICMP_SLT, I1Ty, LV3), nullptr);
+  EXPECT_EQ(LV2.getCompare(CmpInst::ICMP_SLE, I1Ty, LV3), nullptr);
+  EXPECT_EQ(LV2.getCompare(CmpInst::ICMP_NE, I1Ty, LV3), nullptr);
+  EXPECT_EQ(LV2.getCompare(CmpInst::ICMP_EQ, I1Ty, LV3), nullptr);
+  EXPECT_EQ(LV2.getCompare(CmpInst::ICMP_SGE, I1Ty, LV3), nullptr);
+  EXPECT_EQ(LV2.getCompare(CmpInst::ICMP_SGT, I1Ty, LV3), nullptr);
+
+  auto LV4 =
+      ValueLatticeElement::getRange({APInt(32, 15, true), APInt(32, 25, true)});
+  // Check getCompare with overlapping integer ranges.
+  EXPECT_EQ(LV3.getCompare(CmpInst::ICMP_SLT, I1Ty, LV4), nullptr);
+  EXPECT_EQ(LV3.getCompare(CmpInst::ICMP_SLE, I1Ty, LV4), nullptr);
+  EXPECT_EQ(LV3.getCompare(CmpInst::ICMP_NE, I1Ty, LV4), nullptr);
+  EXPECT_EQ(LV3.getCompare(CmpInst::ICMP_EQ, I1Ty, LV4), nullptr);
+  EXPECT_EQ(LV3.getCompare(CmpInst::ICMP_SGE, I1Ty, LV4), nullptr);
+  EXPECT_EQ(LV3.getCompare(CmpInst::ICMP_SGT, I1Ty, LV4), nullptr);
+}
+
+TEST_F(ValueLatticeTest, getCompareFloat) {
+  auto *FloatTy = IntegerType::getFloatTy(Context);
+  auto *I1Ty = IntegerType::get(Context, 1);
+  auto *C1 = ConstantFP::get(FloatTy, 1.0);
+  auto LV1 = ValueLatticeElement::get(C1);
+  auto LV2 = ValueLatticeElement::get(C1);
+
+  // Check getCompare for equal floating point constants.
+  EXPECT_TRUE(LV1.getCompare(CmpInst::FCMP_OEQ, I1Ty, LV2)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::FCMP_OGE, I1Ty, LV2)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::FCMP_OLE, I1Ty, LV2)->isOneValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::FCMP_ONE, I1Ty, LV2)->isZeroValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::FCMP_OLT, I1Ty, LV2)->isZeroValue());
+  EXPECT_TRUE(LV1.getCompare(CmpInst::FCMP_OGT, I1Ty, LV2)->isZeroValue());
+
+  EXPECT_TRUE(
+      LV1.mergeIn(ValueLatticeElement::get(ConstantFP::get(FloatTy, 2.2)),
+                  M.getDataLayout()));
+  EXPECT_EQ(LV1.getCompare(CmpInst::FCMP_OEQ, I1Ty, LV2), nullptr);
+  EXPECT_EQ(LV1.getCompare(CmpInst::FCMP_OGE, I1Ty, LV2), nullptr);
+  EXPECT_EQ(LV1.getCompare(CmpInst::FCMP_OLE, I1Ty, LV2), nullptr);
+  EXPECT_EQ(LV1.getCompare(CmpInst::FCMP_ONE, I1Ty, LV2), nullptr);
+  EXPECT_EQ(LV1.getCompare(CmpInst::FCMP_OLT, I1Ty, LV2), nullptr);
+  EXPECT_EQ(LV1.getCompare(CmpInst::FCMP_OGT, I1Ty, LV2), nullptr);
+}
+
+TEST_F(ValueLatticeTest, getCompareUndef) {
+  auto *I32Ty = IntegerType::get(Context, 32);
+  auto *I1Ty = IntegerType::get(Context, 1);
+
+  auto LV1 = ValueLatticeElement::get(UndefValue::get(I32Ty));
+  auto LV2 =
+      ValueLatticeElement::getRange({APInt(32, 10, true), APInt(32, 20, true)});
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::ICMP_SLT, I1Ty, LV2)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::ICMP_SLE, I1Ty, LV2)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::ICMP_NE, I1Ty, LV2)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::ICMP_EQ, I1Ty, LV2)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::ICMP_SGE, I1Ty, LV2)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::ICMP_SGT, I1Ty, LV2)));
+
+  auto *FloatTy = IntegerType::getFloatTy(Context);
+  auto LV3 = ValueLatticeElement::get(ConstantFP::get(FloatTy, 1.0));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::FCMP_OEQ, I1Ty, LV3)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::FCMP_OGE, I1Ty, LV3)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::FCMP_OLE, I1Ty, LV3)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::FCMP_ONE, I1Ty, LV3)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::FCMP_OLT, I1Ty, LV3)));
+  EXPECT_TRUE(isa<UndefValue>(LV1.getCompare(CmpInst::FCMP_OGT, I1Ty, LV3)));
+}
+
+} // end anonymous namespace
+} // end namespace llvm
diff --git a/src/llvm-project/llvm/unittests/Analysis/ValueTrackingTest.cpp b/src/llvm-project/llvm/unittests/Analysis/ValueTrackingTest.cpp
new file mode 100644
index 0000000..5b36d63
--- /dev/null
+++ b/src/llvm-project/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -0,0 +1,618 @@
+//===- ValueTrackingTest.cpp - ValueTracking tests ------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/AsmParser/Parser.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/KnownBits.h"
+#include "gtest/gtest.h"
+
+using namespace llvm;
+
+namespace {
+
+class ValueTrackingTest : public testing::Test {
+protected:
+  void parseAssembly(const char *Assembly) {
+    SMDiagnostic Error;
+    M = parseAssemblyString(Assembly, Error, Context);
+
+    std::string errMsg;
+    raw_string_ostream os(errMsg);
+    Error.print("", os);
+
+    // A failure here means that the test itself is buggy.
+    if (!M)
+      report_fatal_error(os.str());
+
+    Function *F = M->getFunction("test");
+    if (F == nullptr)
+      report_fatal_error("Test must have a function named @test");
+
+    A = nullptr;
+    for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
+      if (I->hasName()) {
+        if (I->getName() == "A")
+          A = &*I;
+      }
+    }
+    if (A == nullptr)
+      report_fatal_error("@test must have an instruction %A");
+  }
+
+  LLVMContext Context;
+  std::unique_ptr<Module> M;
+  Instruction *A;
+};
+
+class MatchSelectPatternTest : public ValueTrackingTest {
+protected:
+  void expectPattern(const SelectPatternResult &P) {
+    Value *LHS, *RHS;
+    Instruction::CastOps CastOp;
+    SelectPatternResult R = matchSelectPattern(A, LHS, RHS, &CastOp);
+    EXPECT_EQ(P.Flavor, R.Flavor);
+    EXPECT_EQ(P.NaNBehavior, R.NaNBehavior);
+    EXPECT_EQ(P.Ordered, R.Ordered);
+  }
+};
+
+class ComputeKnownBitsTest : public ValueTrackingTest {
+protected:
+  void expectKnownBits(uint64_t Zero, uint64_t One) {
+    auto Known = computeKnownBits(A, M->getDataLayout());
+    ASSERT_FALSE(Known.hasConflict());
+    EXPECT_EQ(Known.One.getZExtValue(), One);
+    EXPECT_EQ(Known.Zero.getZExtValue(), Zero);
+  }
+};
+
+}
+
+TEST_F(MatchSelectPatternTest, SimpleFMin) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ult float %a, 5.0\n"
+      "  %A = select i1 %1, float %a, float 5.0\n"
+      "  ret float %A\n"
+      "}\n");
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false});
+}
+
+TEST_F(MatchSelectPatternTest, SimpleFMax) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float %a, 5.0\n"
+      "  %A = select i1 %1, float %a, float 5.0\n"
+      "  ret float %A\n"
+      "}\n");
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, SwappedFMax) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float 5.0, %a\n"
+      "  %A = select i1 %1, float %a, float 5.0\n"
+      "  ret float %A\n"
+      "}\n");
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false});
+}
+
+TEST_F(MatchSelectPatternTest, SwappedFMax2) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float %a, 5.0\n"
+      "  %A = select i1 %1, float 5.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false});
+}
+
+TEST_F(MatchSelectPatternTest, SwappedFMax3) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ult float %a, 5.0\n"
+      "  %A = select i1 %1, float 5.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, FastFMin) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp nnan olt float %a, 5.0\n"
+      "  %A = select i1 %1, float %a, float 5.0\n"
+      "  ret float %A\n"
+      "}\n");
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_ANY, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMinConstantZero) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ole float %a, 0.0\n"
+      "  %A = select i1 %1, float %a, float 0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // This shouldn't be matched, as %a could be -0.0.
+  expectPattern({SPF_UNKNOWN, SPNB_NA, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMinConstantZeroNsz) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp nsz ole float %a, 0.0\n"
+      "  %A = select i1 %1, float %a, float 0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // But this should be, because we've ignored signed zeroes.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero1) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float -0.0, %a\n"
+      "  %A = select i1 %1, float 0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero2) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float %a, -0.0\n"
+      "  %A = select i1 %1, float 0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero3) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float 0.0, %a\n"
+      "  %A = select i1 %1, float -0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero4) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float %a, 0.0\n"
+      "  %A = select i1 %1, float -0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero5) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float -0.0, %a\n"
+      "  %A = select i1 %1, float %a, float 0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero6) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float %a, -0.0\n"
+      "  %A = select i1 %1, float %a, float 0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero7) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float 0.0, %a\n"
+      "  %A = select i1 %1, float %a, float -0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZero8) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float %a, 0.0\n"
+      "  %A = select i1 %1, float %a, float -0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero1) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float -0.0, %a\n"
+      "  %A = select i1 %1, float 0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero2) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float %a, -0.0\n"
+      "  %A = select i1 %1, float 0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero3) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float 0.0, %a\n"
+      "  %A = select i1 %1, float -0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero4) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float %a, 0.0\n"
+      "  %A = select i1 %1, float -0.0, float %a\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_NAN, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero5) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float -0.0, %a\n"
+      "  %A = select i1 %1, float %a, float 0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero6) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float %a, -0.0\n"
+      "  %A = select i1 %1, float %a, float 0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero7) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp olt float 0.0, %a\n"
+      "  %A = select i1 %1, float %a, float -0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZero8) {
+  parseAssembly(
+      "define float @test(float %a) {\n"
+      "  %1 = fcmp ogt float %a, 0.0\n"
+      "  %A = select i1 %1, float %a, float -0.0\n"
+      "  ret float %A\n"
+      "}\n");
+  // The sign of zero doesn't matter in fcmp.
+  expectPattern({SPF_FMAXNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, FMinMismatchConstantZeroVecUndef) {
+  parseAssembly(
+      "define <2 x float> @test(<2 x float> %a) {\n"
+      "  %1 = fcmp ogt <2 x float> %a, <float -0.0, float -0.0>\n"
+      "  %A = select <2 x i1> %1, <2 x float> <float undef, float 0.0>, <2 x float> %a\n"
+      "  ret <2 x float> %A\n"
+      "}\n");
+  // An undef in a vector constant can not be back-propagated for this analysis.
+  expectPattern({SPF_UNKNOWN, SPNB_NA, false});
+}
+
+TEST_F(MatchSelectPatternTest, FMaxMismatchConstantZeroVecUndef) {
+  parseAssembly(
+      "define <2 x float> @test(<2 x float> %a) {\n"
+      "  %1 = fcmp ogt <2 x float> %a, zeroinitializer\n"
+      "  %A = select <2 x i1> %1, <2 x float> %a, <2 x float> <float -0.0, float undef>\n"
+      "  ret <2 x float> %A\n"
+      "}\n");
+  // An undef in a vector constant can not be back-propagated for this analysis.
+  expectPattern({SPF_UNKNOWN, SPNB_NA, false});
+}
+
+TEST_F(MatchSelectPatternTest, VectorFMinimum) {
+  parseAssembly(
+      "define <4 x float> @test(<4 x float> %a) {\n"
+      "  %1 = fcmp ule <4 x float> %a, \n"
+      "    <float 5.0, float 5.0, float 5.0, float 5.0>\n"
+      "  %A = select <4 x i1> %1, <4 x float> %a,\n"
+      "     <4 x float> <float 5.0, float 5.0, float 5.0, float 5.0>\n"
+      "  ret <4 x float> %A\n"
+      "}\n");
+  // Check that pattern matching works on vectors where each lane has the same
+  // unordered pattern.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_NAN, false});
+}
+
+TEST_F(MatchSelectPatternTest, VectorFMinOtherOrdered) {
+  parseAssembly(
+      "define <4 x float> @test(<4 x float> %a) {\n"
+      "  %1 = fcmp ole <4 x float> %a, \n"
+      "    <float 5.0, float 5.0, float 5.0, float 5.0>\n"
+      "  %A = select <4 x i1> %1, <4 x float> %a,\n"
+      "     <4 x float> <float 5.0, float 5.0, float 5.0, float 5.0>\n"
+      "  ret <4 x float> %A\n"
+      "}\n");
+  // Check that pattern matching works on vectors where each lane has the same
+  // ordered pattern.
+  expectPattern({SPF_FMINNUM, SPNB_RETURNS_OTHER, true});
+}
+
+TEST_F(MatchSelectPatternTest, VectorNotFMinimum) {
+  parseAssembly(
+      "define <4 x float> @test(<4 x float> %a) {\n"
+      "  %1 = fcmp ule <4 x float> %a, \n"
+      "    <float 5.0, float 0x7ff8000000000000, float 5.0, float 5.0>\n"
+      "  %A = select <4 x i1> %1, <4 x float> %a,\n"
+      "     <4 x float> <float 5.0, float 0x7ff8000000000000, float 5.0, float "
+      "5.0>\n"
+      "  ret <4 x float> %A\n"
+      "}\n");
+  // The lane that contains a NaN (0x7ff80...) behaves like a
+  // non-NaN-propagating min and the other lines behave like a NaN-propagating
+  // min, so check that neither is returned.
+  expectPattern({SPF_UNKNOWN, SPNB_NA, false});
+}
+
+TEST_F(MatchSelectPatternTest, VectorNotFMinZero) {
+  parseAssembly(
+      "define <4 x float> @test(<4 x float> %a) {\n"
+      "  %1 = fcmp ule <4 x float> %a, \n"
+      "    <float 5.0, float -0.0, float 5.0, float 5.0>\n"
+      "  %A = select <4 x i1> %1, <4 x float> %a,\n"
+      "     <4 x float> <float 5.0, float 0.0, float 5.0, float 5.0>\n"
+      "  ret <4 x float> %A\n"
+      "}\n");
+  // Always selects the second lane of %a if it is positive or negative zero, so
+  // this is stricter than a min.
+  expectPattern({SPF_UNKNOWN, SPNB_NA, false});
+}
+
+TEST_F(MatchSelectPatternTest, DoubleCastU) {
+  parseAssembly(
+      "define i32 @test(i8 %a, i8 %b) {\n"
+      "  %1 = icmp ult i8 %a, %b\n"
+      "  %2 = zext i8 %a to i32\n"
+      "  %3 = zext i8 %b to i32\n"
+      "  %A = select i1 %1, i32 %2, i32 %3\n"
+      "  ret i32 %A\n"
+      "}\n");
+  // We should be able to look through the situation where we cast both operands
+  // to the select.
+  expectPattern({SPF_UMIN, SPNB_NA, false});
+}
+
+TEST_F(MatchSelectPatternTest, DoubleCastS) {
+  parseAssembly(
+      "define i32 @test(i8 %a, i8 %b) {\n"
+      "  %1 = icmp slt i8 %a, %b\n"
+      "  %2 = sext i8 %a to i32\n"
+      "  %3 = sext i8 %b to i32\n"
+      "  %A = select i1 %1, i32 %2, i32 %3\n"
+      "  ret i32 %A\n"
+      "}\n");
+  // We should be able to look through the situation where we cast both operands
+  // to the select.
+  expectPattern({SPF_SMIN, SPNB_NA, false});
+}
+
+TEST_F(MatchSelectPatternTest, DoubleCastBad) {
+  parseAssembly(
+      "define i32 @test(i8 %a, i8 %b) {\n"
+      "  %1 = icmp ult i8 %a, %b\n"
+      "  %2 = zext i8 %a to i32\n"
+      "  %3 = sext i8 %b to i32\n"
+      "  %A = select i1 %1, i32 %2, i32 %3\n"
+      "  ret i32 %A\n"
+      "}\n");
+  // The cast types here aren't the same, so we cannot match an UMIN.
+  expectPattern({SPF_UNKNOWN, SPNB_NA, false});
+}
+
+TEST(ValueTracking, GuaranteedToTransferExecutionToSuccessor) {
+  StringRef Assembly =
+      "declare void @nounwind_readonly(i32*) nounwind readonly "
+      "declare void @nounwind_argmemonly(i32*) nounwind argmemonly "
+      "declare void @throws_but_readonly(i32*) readonly "
+      "declare void @throws_but_argmemonly(i32*) argmemonly "
+      " "
+      "declare void @unknown(i32*) "
+      " "
+      "define void @f(i32* %p) { "
+      "  call void @nounwind_readonly(i32* %p) "
+      "  call void @nounwind_argmemonly(i32* %p) "
+      "  call void @throws_but_readonly(i32* %p) "
+      "  call void @throws_but_argmemonly(i32* %p) "
+      "  call void @unknown(i32* %p) nounwind readonly "
+      "  call void @unknown(i32* %p) nounwind argmemonly "
+      "  call void @unknown(i32* %p) readonly "
+      "  call void @unknown(i32* %p) argmemonly "
+      "  ret void "
+      "} ";
+
+  LLVMContext Context;
+  SMDiagnostic Error;
+  auto M = parseAssemblyString(Assembly, Error, Context);
+  assert(M && "Bad assembly?");
+
+  auto *F = M->getFunction("f");
+  assert(F && "Bad assembly?");
+
+  auto &BB = F->getEntryBlock();
+  bool ExpectedAnswers[] = {
+      true,  // call void @nounwind_readonly(i32* %p)
+      true,  // call void @nounwind_argmemonly(i32* %p)
+      false, // call void @throws_but_readonly(i32* %p)
+      false, // call void @throws_but_argmemonly(i32* %p)
+      true,  // call void @unknown(i32* %p) nounwind readonly
+      true,  // call void @unknown(i32* %p) nounwind argmemonly
+      false, // call void @unknown(i32* %p) readonly
+      false, // call void @unknown(i32* %p) argmemonly
+      false, // ret void
+  };
+
+  int Index = 0;
+  for (auto &I : BB) {
+    EXPECT_EQ(isGuaranteedToTransferExecutionToSuccessor(&I),
+              ExpectedAnswers[Index])
+        << "Incorrect answer at instruction " << Index << " = " << I;
+    Index++;
+  }
+}
+
+TEST_F(ValueTrackingTest, ComputeNumSignBits_PR32045) {
+  parseAssembly(
+      "define i32 @test(i32 %a) {\n"
+      "  %A = ashr i32 %a, -1\n"
+      "  ret i32 %A\n"
+      "}\n");
+  EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u);
+}
+
+// No guarantees for canonical IR in this analysis, so this just bails out. 
+TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle) {
+  parseAssembly(
+      "define <2 x i32> @test() {\n"
+      "  %A = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 0>\n"
+      "  ret <2 x i32> %A\n"
+      "}\n");
+  EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u);
+}
+
+// No guarantees for canonical IR in this analysis, so a shuffle element that
+// references an undef value means this can't return any extra information. 
+TEST_F(ValueTrackingTest, ComputeNumSignBits_Shuffle2) {
+  parseAssembly(
+      "define <2 x i32> @test(<2 x i1> %x) {\n"
+      "  %sext = sext <2 x i1> %x to <2 x i32>\n"
+      "  %A = shufflevector <2 x i32> %sext, <2 x i32> undef, <2 x i32> <i32 0, i32 2>\n"
+      "  ret <2 x i32> %A\n"
+      "}\n");
+  EXPECT_EQ(ComputeNumSignBits(A, M->getDataLayout()), 1u);
+}
+
+TEST_F(ComputeKnownBitsTest, ComputeKnownBits) {
+  parseAssembly(
+      "define i32 @test(i32 %a, i32 %b) {\n"
+      "  %ash = mul i32 %a, 8\n"
+      "  %aad = add i32 %ash, 7\n"
+      "  %aan = and i32 %aad, 4095\n"
+      "  %bsh = shl i32 %b, 4\n"
+      "  %bad = or i32 %bsh, 6\n"
+      "  %ban = and i32 %bad, 4095\n"
+      "  %A = mul i32 %aan, %ban\n"
+      "  ret i32 %A\n"
+      "}\n");
+  expectKnownBits(/*zero*/ 4278190085u, /*one*/ 10u);
+}
+
+TEST_F(ComputeKnownBitsTest, ComputeKnownMulBits) {
+  parseAssembly(
+      "define i32 @test(i32 %a, i32 %b) {\n"
+      "  %aa = shl i32 %a, 5\n"
+      "  %bb = shl i32 %b, 5\n"
+      "  %aaa = or i32 %aa, 24\n"
+      "  %bbb = or i32 %bb, 28\n"
+      "  %A = mul i32 %aaa, %bbb\n"
+      "  ret i32 %A\n"
+      "}\n");
+  expectKnownBits(/*zero*/ 95u, /*one*/ 32u);
+}
+
+TEST_F(ComputeKnownBitsTest, ComputeKnownFshl) {
+  // fshl(....1111....0000, 00..1111........, 6)
+  // = 11....000000..11
+  parseAssembly(
+      "define i16 @test(i16 %a, i16 %b) {\n"
+      "  %aa = shl i16 %a, 4\n"
+      "  %bb = lshr i16 %b, 2\n"
+      "  %aaa = or i16 %aa, 3840\n"
+      "  %bbb = or i16 %bb, 3840\n"
+      "  %A = call i16 @llvm.fshl.i16(i16 %aaa, i16 %bbb, i16 6)\n"
+      "  ret i16 %A\n"
+      "}\n"
+      "declare i16 @llvm.fshl.i16(i16, i16, i16)\n");
+  expectKnownBits(/*zero*/ 1008u, /*one*/ 49155u);
+}
+
+TEST_F(ComputeKnownBitsTest, ComputeKnownFshr) {
+  // fshr(....1111....0000, 00..1111........, 26)
+  // = 11....000000..11
+  parseAssembly(
+      "define i16 @test(i16 %a, i16 %b) {\n"
+      "  %aa = shl i16 %a, 4\n"
+      "  %bb = lshr i16 %b, 2\n"
+      "  %aaa = or i16 %aa, 3840\n"
+      "  %bbb = or i16 %bb, 3840\n"
+      "  %A = call i16 @llvm.fshr.i16(i16 %aaa, i16 %bbb, i16 26)\n"
+      "  ret i16 %A\n"
+      "}\n"
+      "declare i16 @llvm.fshr.i16(i16, i16, i16)\n");
+  expectKnownBits(/*zero*/ 1008u, /*one*/ 49155u);
+}
+
+TEST_F(ComputeKnownBitsTest, ComputeKnownFshlZero) {
+  // fshl(....1111....0000, 00..1111........, 0)
+  // = ....1111....0000
+  parseAssembly(
+      "define i16 @test(i16 %a, i16 %b) {\n"
+      "  %aa = shl i16 %a, 4\n"
+      "  %bb = lshr i16 %b, 2\n"
+      "  %aaa = or i16 %aa, 3840\n"
+      "  %bbb = or i16 %bb, 3840\n"
+      "  %A = call i16 @llvm.fshl.i16(i16 %aaa, i16 %bbb, i16 0)\n"
+      "  ret i16 %A\n"
+      "}\n"
+      "declare i16 @llvm.fshl.i16(i16, i16, i16)\n");
+  expectKnownBits(/*zero*/ 15u, /*one*/ 3840u);
+}