Importing rustc-1.56.0

Change-Id: I98941481270706fa55f8fb2cb91686ae3bd30f38
diff --git a/src/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp
index fae7a843..e7445e2 100644
--- a/src/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -63,9 +63,18 @@
 STATISTIC(NumMayAlias,  "Number of MayAlias results");
 STATISTIC(NumMustAlias, "Number of MustAlias results");
 
+namespace llvm {
 /// Allow disabling BasicAA from the AA results. This is particularly useful
 /// when testing to isolate a single AA implementation.
 cl::opt<bool> DisableBasicAA("disable-basic-aa", cl::Hidden, cl::init(false));
+} // namespace llvm
+
+#ifndef NDEBUG
+/// Print a trace of alias analysis queries and their results.
+static cl::opt<bool> EnableAATrace("aa-trace", cl::Hidden, cl::init(false));
+#else
+static const bool EnableAATrace = false;
+#endif
 
 AAResults::AAResults(AAResults &&Arg)
     : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) {
@@ -116,20 +125,34 @@
 
 AliasResult AAResults::alias(const MemoryLocation &LocA,
                              const MemoryLocation &LocB, AAQueryInfo &AAQI) {
-  AliasResult Result = MayAlias;
+  AliasResult Result = AliasResult::MayAlias;
 
-  Depth++;
+  if (EnableAATrace) {
+    for (unsigned I = 0; I < AAQI.Depth; ++I)
+      dbgs() << "  ";
+    dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", "
+           << *LocB.Ptr << " @ " << LocB.Size << "\n";
+  }
+
+  AAQI.Depth++;
   for (const auto &AA : AAs) {
     Result = AA->alias(LocA, LocB, AAQI);
-    if (Result != MayAlias)
+    if (Result != AliasResult::MayAlias)
       break;
   }
-  Depth--;
+  AAQI.Depth--;
 
-  if (Depth == 0) {
-    if (Result == NoAlias)
+  if (EnableAATrace) {
+    for (unsigned I = 0; I < AAQI.Depth; ++I)
+      dbgs() << "  ";
+    dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", "
+           << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n";
+  }
+
+  if (AAQI.Depth == 0) {
+    if (Result == AliasResult::NoAlias)
       ++NumNoAlias;
-    else if (Result == MustAlias)
+    else if (Result == AliasResult::MustAlias)
       ++NumMustAlias;
     else
       ++NumMayAlias;
@@ -235,12 +258,12 @@
         MemoryLocation ArgLoc =
             MemoryLocation::getForArgument(Call, ArgIdx, TLI);
         AliasResult ArgAlias = alias(ArgLoc, Loc, AAQI);
-        if (ArgAlias != NoAlias) {
+        if (ArgAlias != AliasResult::NoAlias) {
           ModRefInfo ArgMask = getArgModRefInfo(Call, ArgIdx);
           AllArgsMask = unionModRef(AllArgsMask, ArgMask);
         }
         // Conservatively clear IsMustAlias unless only MustAlias is found.
-        IsMustAlias &= (ArgAlias == MustAlias);
+        IsMustAlias &= (ArgAlias == AliasResult::MustAlias);
       }
     }
     // Return NoModRef if no alias found with any argument.
@@ -428,17 +451,19 @@
 
 raw_ostream &llvm::operator<<(raw_ostream &OS, AliasResult AR) {
   switch (AR) {
-  case NoAlias:
+  case AliasResult::NoAlias:
     OS << "NoAlias";
     break;
-  case MustAlias:
+  case AliasResult::MustAlias:
     OS << "MustAlias";
     break;
-  case MayAlias:
+  case AliasResult::MayAlias:
     OS << "MayAlias";
     break;
-  case PartialAlias:
+  case AliasResult::PartialAlias:
     OS << "PartialAlias";
+    if (AR.hasOffset())
+      OS << " (off " << AR.getOffset() << ")";
     break;
   }
   return OS;
@@ -464,9 +489,9 @@
   // or write the specified memory.
   if (Loc.Ptr) {
     AliasResult AR = alias(MemoryLocation::get(L), Loc, AAQI);
-    if (AR == NoAlias)
+    if (AR == AliasResult::NoAlias)
       return ModRefInfo::NoModRef;
-    if (AR == MustAlias)
+    if (AR == AliasResult::MustAlias)
       return ModRefInfo::MustRef;
   }
   // Otherwise, a load just reads.
@@ -489,7 +514,7 @@
     AliasResult AR = alias(MemoryLocation::get(S), Loc, AAQI);
     // If the store address cannot alias the pointer in question, then the
     // specified memory cannot be modified by the store.
-    if (AR == NoAlias)
+    if (AR == AliasResult::NoAlias)
       return ModRefInfo::NoModRef;
 
     // If the pointer is a pointer to constant memory, then it could not have
@@ -498,7 +523,7 @@
       return ModRefInfo::NoModRef;
 
     // If the store address aliases the pointer as must alias, set Must.
-    if (AR == MustAlias)
+    if (AR == AliasResult::MustAlias)
       return ModRefInfo::MustMod;
   }
 
@@ -534,7 +559,7 @@
     AliasResult AR = alias(MemoryLocation::get(V), Loc, AAQI);
     // If the va_arg address cannot alias the pointer in question, then the
     // specified memory cannot be accessed by the va_arg.
-    if (AR == NoAlias)
+    if (AR == AliasResult::NoAlias)
       return ModRefInfo::NoModRef;
 
     // If the pointer is a pointer to constant memory, then it could not have
@@ -543,7 +568,7 @@
       return ModRefInfo::NoModRef;
 
     // If the va_arg aliases the pointer as must alias, set Must.
-    if (AR == MustAlias)
+    if (AR == AliasResult::MustAlias)
       return ModRefInfo::MustModRef;
   }
 
@@ -608,11 +633,11 @@
     AliasResult AR = alias(MemoryLocation::get(CX), Loc, AAQI);
     // If the cmpxchg address does not alias the location, it does not access
     // it.
-    if (AR == NoAlias)
+    if (AR == AliasResult::NoAlias)
       return ModRefInfo::NoModRef;
 
     // If the cmpxchg address aliases the pointer as must alias, set Must.
-    if (AR == MustAlias)
+    if (AR == AliasResult::MustAlias)
       return ModRefInfo::MustModRef;
   }
 
@@ -636,11 +661,11 @@
     AliasResult AR = alias(MemoryLocation::get(RMW), Loc, AAQI);
     // If the atomicrmw address does not alias the location, it does not access
     // it.
-    if (AR == NoAlias)
+    if (AR == AliasResult::NoAlias)
       return ModRefInfo::NoModRef;
 
     // If the atomicrmw address aliases the pointer as must alias, set Must.
-    if (AR == MustAlias)
+    if (AR == AliasResult::MustAlias)
       return ModRefInfo::MustModRef;
   }
 
@@ -693,13 +718,13 @@
 /// with a smarter AA in place, this test is just wasting compile time.
 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
                                          const MemoryLocation &MemLoc,
-                                         DominatorTree *DT) {
+                                         DominatorTree *DT,
+                                         AAQueryInfo &AAQI) {
   if (!DT)
     return ModRefInfo::ModRef;
 
   const Value *Object = getUnderlyingObject(MemLoc.Ptr);
-  if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
-      isa<Constant>(Object))
+  if (!isIdentifiedFunctionLocal(Object))
     return ModRefInfo::ModRef;
 
   const auto *Call = dyn_cast<CallBase>(I);
@@ -725,14 +750,16 @@
          !Call->isByValArgument(ArgNo)))
       continue;
 
-    AliasResult AR = alias(*CI, Object);
+    AliasResult AR = alias(
+        MemoryLocation::getBeforeOrAfter(*CI),
+        MemoryLocation::getBeforeOrAfter(Object), AAQI);
     // If this is a no-capture pointer argument, see if we can tell that it
     // is impossible to alias the pointer we're checking.  If not, we have to
     // assume that the call could touch the pointer, even though it doesn't
     // escape.
-    if (AR != MustAlias)
+    if (AR != AliasResult::MustAlias)
       IsMustAlias = false;
-    if (AR == NoAlias)
+    if (AR == AliasResult::NoAlias)
       continue;
     if (Call->doesNotAccessMemory(ArgNo))
       continue;
diff --git a/src/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp b/src/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
index bbfa82b..0c097b2 100644
--- a/src/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/AliasAnalysisEvaluator.cpp
@@ -51,8 +51,11 @@
       V2->printAsOperand(os2, true, M);
     }
 
-    if (o2 < o1)
+    if (o2 < o1) {
       std::swap(o1, o2);
+      // Change offset sign for the local AR, for printing only.
+      AR.swap();
+    }
     errs() << "  " << AR << ":\t" << o1 << ", " << o2 << "\n";
   }
 }
@@ -105,14 +108,13 @@
     if (I.getType()->isPointerTy())    // Add all pointer arguments.
       Pointers.insert(&I);
 
-  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
-    if (I->getType()->isPointerTy()) // Add all pointer instructions.
-      Pointers.insert(&*I);
-    if (EvalAAMD && isa<LoadInst>(&*I))
-      Loads.insert(&*I);
-    if (EvalAAMD && isa<StoreInst>(&*I))
-      Stores.insert(&*I);
-    Instruction &Inst = *I;
+  for (Instruction &Inst : instructions(F)) {
+    if (Inst.getType()->isPointerTy()) // Add all pointer instructions.
+      Pointers.insert(&Inst);
+    if (EvalAAMD && isa<LoadInst>(&Inst))
+      Loads.insert(&Inst);
+    if (EvalAAMD && isa<StoreInst>(&Inst))
+      Stores.insert(&Inst);
     if (auto *Call = dyn_cast<CallBase>(&Inst)) {
       Value *Callee = Call->getCalledOperand();
       // Skip actual functions for direct function calls.
@@ -125,10 +127,9 @@
       Calls.insert(Call);
     } else {
       // Consider all operands.
-      for (Instruction::op_iterator OI = Inst.op_begin(), OE = Inst.op_end();
-           OI != OE; ++OI)
-        if (isInterestingPointer(*OI))
-          Pointers.insert(*OI);
+      for (Use &Op : Inst.operands())
+        if (isInterestingPointer(Op))
+          Pointers.insert(Op);
     }
   }
 
@@ -153,19 +154,19 @@
 
       AliasResult AR = AA.alias(*I1, I1Size, *I2, I2Size);
       switch (AR) {
-      case NoAlias:
+      case AliasResult::NoAlias:
         PrintResults(AR, PrintNoAlias, *I1, *I2, F.getParent());
         ++NoAliasCount;
         break;
-      case MayAlias:
+      case AliasResult::MayAlias:
         PrintResults(AR, PrintMayAlias, *I1, *I2, F.getParent());
         ++MayAliasCount;
         break;
-      case PartialAlias:
+      case AliasResult::PartialAlias:
         PrintResults(AR, PrintPartialAlias, *I1, *I2, F.getParent());
         ++PartialAliasCount;
         break;
-      case MustAlias:
+      case AliasResult::MustAlias:
         PrintResults(AR, PrintMustAlias, *I1, *I2, F.getParent());
         ++MustAliasCount;
         break;
@@ -180,19 +181,19 @@
         AliasResult AR = AA.alias(MemoryLocation::get(cast<LoadInst>(Load)),
                                   MemoryLocation::get(cast<StoreInst>(Store)));
         switch (AR) {
-        case NoAlias:
+        case AliasResult::NoAlias:
           PrintLoadStoreResults(AR, PrintNoAlias, Load, Store, F.getParent());
           ++NoAliasCount;
           break;
-        case MayAlias:
+        case AliasResult::MayAlias:
           PrintLoadStoreResults(AR, PrintMayAlias, Load, Store, F.getParent());
           ++MayAliasCount;
           break;
-        case PartialAlias:
+        case AliasResult::PartialAlias:
           PrintLoadStoreResults(AR, PrintPartialAlias, Load, Store, F.getParent());
           ++PartialAliasCount;
           break;
-        case MustAlias:
+        case AliasResult::MustAlias:
           PrintLoadStoreResults(AR, PrintMustAlias, Load, Store, F.getParent());
           ++MustAliasCount;
           break;
@@ -207,19 +208,19 @@
         AliasResult AR = AA.alias(MemoryLocation::get(cast<StoreInst>(*I1)),
                                   MemoryLocation::get(cast<StoreInst>(*I2)));
         switch (AR) {
-        case NoAlias:
+        case AliasResult::NoAlias:
           PrintLoadStoreResults(AR, PrintNoAlias, *I1, *I2, F.getParent());
           ++NoAliasCount;
           break;
-        case MayAlias:
+        case AliasResult::MayAlias:
           PrintLoadStoreResults(AR, PrintMayAlias, *I1, *I2, F.getParent());
           ++MayAliasCount;
           break;
-        case PartialAlias:
+        case AliasResult::PartialAlias:
           PrintLoadStoreResults(AR, PrintPartialAlias, *I1, *I2, F.getParent());
           ++PartialAliasCount;
           break;
-        case MustAlias:
+        case AliasResult::MustAlias:
           PrintLoadStoreResults(AR, PrintMustAlias, *I1, *I2, F.getParent());
           ++MustAliasCount;
           break;
diff --git a/src/llvm-project/llvm/lib/Analysis/AliasSetTracker.cpp b/src/llvm-project/llvm/lib/Analysis/AliasSetTracker.cpp
index 347bcd0..5dc6c77 100644
--- a/src/llvm-project/llvm/lib/Analysis/AliasSetTracker.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/AliasSetTracker.cpp
@@ -63,9 +63,9 @@
     PointerRec *R = AS.getSomePointer();
 
     // If the pointers are not a must-alias pair, this set becomes a may alias.
-    if (AA.alias(MemoryLocation(L->getValue(), L->getSize(), L->getAAInfo()),
-                 MemoryLocation(R->getValue(), R->getSize(), R->getAAInfo())) !=
-        MustAlias)
+    if (!AA.isMustAlias(
+            MemoryLocation(L->getValue(), L->getSize(), L->getAAInfo()),
+            MemoryLocation(R->getValue(), R->getSize(), R->getAAInfo())))
       Alias = SetMayAlias;
   }
 
@@ -141,11 +141,11 @@
         AliasResult Result = AA.alias(
             MemoryLocation(P->getValue(), P->getSize(), P->getAAInfo()),
             MemoryLocation(Entry.getValue(), Size, AAInfo));
-        if (Result != MustAlias) {
+        if (Result != AliasResult::MustAlias) {
           Alias = SetMayAlias;
           AST.TotalMayAliasSetSize += size();
         }
-        assert(Result != NoAlias && "Cannot be part of must set!");
+        assert(Result != AliasResult::NoAlias && "Cannot be part of must set!");
       } else if (!SkipSizeUpdate)
         P->updateSizeAndAAInfo(Size, AAInfo);
     }
@@ -195,7 +195,7 @@
                                      const AAMDNodes &AAInfo,
                                      AliasAnalysis &AA) const {
   if (AliasAny)
-    return MayAlias;
+    return AliasResult::MayAlias;
 
   if (Alias == SetMustAlias) {
     assert(UnknownInsts.empty() && "Illegal must alias set!");
@@ -211,11 +211,13 @@
 
   // If this is a may-alias set, we have to check all of the pointers in the set
   // to be sure it doesn't alias the set...
-  for (iterator I = begin(), E = end(); I != E; ++I)
-    if (AliasResult AR = AA.alias(
-            MemoryLocation(Ptr, Size, AAInfo),
-            MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo())))
+  for (iterator I = begin(), E = end(); I != E; ++I) {
+    AliasResult AR =
+        AA.alias(MemoryLocation(Ptr, Size, AAInfo),
+                 MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo()));
+    if (AR != AliasResult::NoAlias)
       return AR;
+  }
 
   // Check the unknown instructions...
   if (!UnknownInsts.empty()) {
@@ -223,10 +225,10 @@
       if (auto *Inst = getUnknownInst(i))
         if (isModOrRefSet(
                 AA.getModRefInfo(Inst, MemoryLocation(Ptr, Size, AAInfo))))
-          return MayAlias;
+          return AliasResult::MayAlias;
   }
 
-  return NoAlias;
+  return AliasResult::NoAlias;
 }
 
 bool AliasSet::aliasesUnknownInst(const Instruction *Inst,
@@ -283,9 +285,8 @@
 
 void AliasSetTracker::clear() {
   // Delete all the PointerRec entries.
-  for (PointerMapType::iterator I = PointerMap.begin(), E = PointerMap.end();
-       I != E; ++I)
-    I->second->eraseFromList();
+  for (auto &I : PointerMap)
+    I.second->eraseFromList();
 
   PointerMap.clear();
 
@@ -302,44 +303,41 @@
                                                     const AAMDNodes &AAInfo,
                                                     bool &MustAliasAll) {
   AliasSet *FoundSet = nullptr;
-  AliasResult AllAR = MustAlias;
-  for (iterator I = begin(), E = end(); I != E;) {
-    iterator Cur = I++;
-    if (Cur->Forward)
+  MustAliasAll = true;
+  for (AliasSet &AS : llvm::make_early_inc_range(*this)) {
+    if (AS.Forward)
       continue;
 
-    AliasResult AR = Cur->aliasesPointer(Ptr, Size, AAInfo, AA);
-    if (AR == NoAlias)
+    AliasResult AR = AS.aliasesPointer(Ptr, Size, AAInfo, AA);
+    if (AR == AliasResult::NoAlias)
       continue;
 
-    AllAR =
-        AliasResult(AllAR & AR); // Possible downgrade to May/Partial, even No
+    if (AR != AliasResult::MustAlias)
+      MustAliasAll = false;
 
     if (!FoundSet) {
       // If this is the first alias set ptr can go into, remember it.
-      FoundSet = &*Cur;
+      FoundSet = &AS;
     } else {
       // Otherwise, we must merge the sets.
-      FoundSet->mergeSetIn(*Cur, *this);
+      FoundSet->mergeSetIn(AS, *this);
     }
   }
 
-  MustAliasAll = (AllAR == MustAlias);
   return FoundSet;
 }
 
 AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) {
   AliasSet *FoundSet = nullptr;
-  for (iterator I = begin(), E = end(); I != E;) {
-    iterator Cur = I++;
-    if (Cur->Forward || !Cur->aliasesUnknownInst(Inst, AA))
+  for (AliasSet &AS : llvm::make_early_inc_range(*this)) {
+    if (AS.Forward || !AS.aliasesUnknownInst(Inst, AA))
       continue;
     if (!FoundSet) {
       // If this is the first alias set ptr can go into, remember it.
-      FoundSet = &*Cur;
+      FoundSet = &AS;
     } else {
       // Otherwise, we must merge the sets.
-      FoundSet->mergeSetIn(*Cur, *this);
+      FoundSet->mergeSetIn(AS, *this);
     }
   }
   return FoundSet;
@@ -599,8 +597,8 @@
   // without worrying about iterator invalidation.
   std::vector<AliasSet *> ASVector;
   ASVector.reserve(SaturationThreshold);
-  for (iterator I = begin(), E = end(); I != E; I++)
-    ASVector.push_back(&*I);
+  for (AliasSet &AS : *this)
+    ASVector.push_back(&AS);
 
   // Copy all instructions and pointers into a new set, and forward all other
   // sets to it.
@@ -746,8 +744,8 @@
       auto &AAWP = getAnalysis<AAResultsWrapperPass>();
       AliasSetTracker Tracker(AAWP.getAAResults());
       errs() << "Alias sets for function '" << F.getName() << "':\n";
-      for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
-        Tracker.add(&*I);
+      for (Instruction &I : instructions(F))
+        Tracker.add(&I);
       Tracker.print(errs());
       return false;
     }
@@ -770,8 +768,8 @@
   auto &AA = AM.getResult<AAManager>(F);
   AliasSetTracker Tracker(AA);
   OS << "Alias sets for function '" << F.getName() << "':\n";
-  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
-    Tracker.add(&*I);
+  for (Instruction &I : instructions(F))
+    Tracker.add(&I);
   Tracker.print(OS);
   return PreservedAnalyses::all();
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/AssumeBundleQueries.cpp b/src/llvm-project/llvm/lib/Analysis/AssumeBundleQueries.cpp
index 0084e2f..dee0443 100644
--- a/src/llvm-project/llvm/lib/Analysis/AssumeBundleQueries.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/AssumeBundleQueries.cpp
@@ -6,8 +6,6 @@
 //
 //===----------------------------------------------------------------------===//
 
-#define DEBUG_TYPE "assume-queries"
-
 #include "llvm/Analysis/AssumeBundleQueries.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AssumptionCache.h"
@@ -18,6 +16,8 @@
 #include "llvm/IR/PatternMatch.h"
 #include "llvm/Support/DebugCounter.h"
 
+#define DEBUG_TYPE "assume-queries"
+
 using namespace llvm;
 using namespace llvm::PatternMatch;
 
@@ -33,23 +33,18 @@
   return BOI.End - BOI.Begin > Idx;
 }
 
-static Value *getValueFromBundleOpInfo(CallInst &Assume,
+static Value *getValueFromBundleOpInfo(AssumeInst &Assume,
                                        const CallBase::BundleOpInfo &BOI,
                                        unsigned Idx) {
   assert(bundleHasArgument(BOI, Idx) && "index out of range");
   return (Assume.op_begin() + BOI.Begin + Idx)->get();
 }
 
-bool llvm::hasAttributeInAssume(CallInst &AssumeCI, Value *IsOn,
+bool llvm::hasAttributeInAssume(AssumeInst &Assume, Value *IsOn,
                                 StringRef AttrName, uint64_t *ArgVal) {
-  assert(isa<IntrinsicInst>(AssumeCI) &&
-         "this function is intended to be used on llvm.assume");
-  IntrinsicInst &Assume = cast<IntrinsicInst>(AssumeCI);
-  assert(Assume.getIntrinsicID() == Intrinsic::assume &&
-         "this function is intended to be used on llvm.assume");
   assert(Attribute::isExistingAttribute(AttrName) &&
          "this attribute doesn't exist");
-  assert((ArgVal == nullptr || Attribute::doesAttrKindHaveArgument(
+  assert((ArgVal == nullptr || Attribute::isIntAttrKind(
                                    Attribute::getAttrKindFromName(AttrName))) &&
          "requested value for an attribute that has no argument");
   if (Assume.bundle_op_infos().empty())
@@ -72,10 +67,7 @@
   return false;
 }
 
-void llvm::fillMapFromAssume(CallInst &AssumeCI, RetainedKnowledgeMap &Result) {
-  IntrinsicInst &Assume = cast<IntrinsicInst>(AssumeCI);
-  assert(Assume.getIntrinsicID() == Intrinsic::assume &&
-         "this function is intended to be used on llvm.assume");
+void llvm::fillMapFromAssume(AssumeInst &Assume, RetainedKnowledgeMap &Result) {
   for (auto &Bundles : Assume.bundle_op_infos()) {
     std::pair<Value *, Attribute::AttrKind> Key{
         nullptr, Attribute::getAttrKindFromName(Bundles.Tag->getKey())};
@@ -88,9 +80,11 @@
       Result[Key][&Assume] = {0, 0};
       continue;
     }
-    unsigned Val = cast<ConstantInt>(
-                       getValueFromBundleOpInfo(Assume, Bundles, ABA_Argument))
-                       ->getZExtValue();
+    auto *CI = dyn_cast<ConstantInt>(
+        getValueFromBundleOpInfo(Assume, Bundles, ABA_Argument));
+    if (!CI)
+      continue;
+    unsigned Val = CI->getZExtValue();
     auto Lookup = Result.find(Key);
     if (Lookup == Result.end() || !Lookup->second.count(&Assume)) {
       Result[Key][&Assume] = {Val, Val};
@@ -102,7 +96,7 @@
 }
 
 RetainedKnowledge
-llvm::getKnowledgeFromBundle(CallInst &Assume,
+llvm::getKnowledgeFromBundle(AssumeInst &Assume,
                              const CallBase::BundleOpInfo &BOI) {
   RetainedKnowledge Result;
   Result.AttrKind = Attribute::getAttrKindFromName(BOI.Tag->getKey());
@@ -122,19 +116,13 @@
   return Result;
 }
 
-RetainedKnowledge llvm::getKnowledgeFromOperandInAssume(CallInst &AssumeCI,
+RetainedKnowledge llvm::getKnowledgeFromOperandInAssume(AssumeInst &Assume,
                                                         unsigned Idx) {
-  IntrinsicInst &Assume = cast<IntrinsicInst>(AssumeCI);
-  assert(Assume.getIntrinsicID() == Intrinsic::assume &&
-         "this function is intended to be used on llvm.assume");
   CallBase::BundleOpInfo BOI = Assume.getBundleOpInfoForOperand(Idx);
-  return getKnowledgeFromBundle(AssumeCI, BOI);
+  return getKnowledgeFromBundle(Assume, BOI);
 }
 
-bool llvm::isAssumeWithEmptyBundle(CallInst &CI) {
-  IntrinsicInst &Assume = cast<IntrinsicInst>(CI);
-  assert(Assume.getIntrinsicID() == Intrinsic::assume &&
-         "this function is intended to be used on llvm.assume");
+bool llvm::isAssumeWithEmptyBundle(AssumeInst &Assume) {
   return none_of(Assume.bundle_op_infos(),
                  [](const CallBase::BundleOpInfo &BOI) {
                    return BOI.Tag->getKey() != IgnoreBundleTag;
@@ -142,10 +130,10 @@
 }
 
 static CallInst::BundleOpInfo *getBundleFromUse(const Use *U) {
-  auto *Intr = dyn_cast<IntrinsicInst>(U->getUser());
   if (!match(U->getUser(),
              m_Intrinsic<Intrinsic::assume>(m_Unless(m_Specific(U->get())))))
     return nullptr;
+  auto *Intr = cast<IntrinsicInst>(U->getUser());
   return &Intr->getBundleOpInfoForOperand(U->getOperandNo());
 }
 
@@ -156,10 +144,9 @@
   if (!Bundle)
     return RetainedKnowledge::none();
   RetainedKnowledge RK =
-      getKnowledgeFromBundle(*cast<CallInst>(U->getUser()), *Bundle);
-  for (auto Attr : AttrKinds)
-    if (Attr == RK.AttrKind)
-      return RK;
+      getKnowledgeFromBundle(*cast<AssumeInst>(U->getUser()), *Bundle);
+  if (llvm::is_contained(AttrKinds, RK.AttrKind))
+    return RK;
   return RetainedKnowledge::none();
 }
 
@@ -175,7 +162,7 @@
     return RetainedKnowledge::none();
   if (AC) {
     for (AssumptionCache::ResultElem &Elem : AC->assumptionsFor(V)) {
-      IntrinsicInst *II = cast_or_null<IntrinsicInst>(Elem.Assume);
+      auto *II = cast_or_null<AssumeInst>(Elem.Assume);
       if (!II || Elem.Index == AssumptionCache::ExprResultIdx)
         continue;
       if (RetainedKnowledge RK = getKnowledgeFromBundle(
@@ -196,7 +183,7 @@
     if (!Bundle)
       continue;
     if (RetainedKnowledge RK =
-            getKnowledgeFromBundle(*cast<CallInst>(U.getUser()), *Bundle))
+            getKnowledgeFromBundle(*cast<AssumeInst>(U.getUser()), *Bundle))
       if (is_contained(AttrKinds, RK.AttrKind) &&
           Filter(RK, cast<Instruction>(U.getUser()), Bundle)) {
         NumUsefullAssumeQueries++;
diff --git a/src/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp b/src/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp
index 70053fd..0d95b33 100644
--- a/src/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/AssumptionCache.cpp
@@ -56,7 +56,7 @@
 }
 
 static void
-findAffectedValues(CallInst *CI,
+findAffectedValues(CallBase *CI,
                    SmallVectorImpl<AssumptionCache::ResultElem> &Affected) {
   // Note: This code must be kept in-sync with the code in
   // computeKnownBitsFromAssume in ValueTracking.
@@ -126,7 +126,7 @@
   }
 }
 
-void AssumptionCache::updateAffectedValues(CallInst *CI) {
+void AssumptionCache::updateAffectedValues(AssumeInst *CI) {
   SmallVector<AssumptionCache::ResultElem, 16> Affected;
   findAffectedValues(CI, Affected);
 
@@ -139,7 +139,7 @@
   }
 }
 
-void AssumptionCache::unregisterAssumption(CallInst *CI) {
+void AssumptionCache::unregisterAssumption(AssumeInst *CI) {
   SmallVector<AssumptionCache::ResultElem, 16> Affected;
   findAffectedValues(CI, Affected);
 
@@ -202,22 +202,19 @@
   // Go through all instructions in all blocks, add all calls to @llvm.assume
   // to this cache.
   for (BasicBlock &B : F)
-    for (Instruction &II : B)
-      if (match(&II, m_Intrinsic<Intrinsic::assume>()))
-        AssumeHandles.push_back({&II, ExprResultIdx});
+    for (Instruction &I : B)
+      if (isa<AssumeInst>(&I))
+        AssumeHandles.push_back({&I, ExprResultIdx});
 
   // Mark the scan as complete.
   Scanned = true;
 
   // Update affected values.
   for (auto &A : AssumeHandles)
-    updateAffectedValues(cast<CallInst>(A));
+    updateAffectedValues(cast<AssumeInst>(A));
 }
 
-void AssumptionCache::registerAssumption(CallInst *CI) {
-  assert(match(CI, m_Intrinsic<Intrinsic::assume>()) &&
-         "Registered call does not call @llvm.assume");
-
+void AssumptionCache::registerAssumption(AssumeInst *CI) {
   // If we haven't scanned the function yet, just drop this assumption. It will
   // be found when we scan later.
   if (!Scanned)
diff --git a/src/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp
index 97d0cb6..357772c 100644
--- a/src/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp
@@ -23,7 +23,6 @@
 #include "llvm/Analysis/CFG.h"
 #include "llvm/Analysis/CaptureTracking.h"
 #include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/Analysis/PhiValues.h"
@@ -104,7 +103,6 @@
   // depend on them.
   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
-      (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
     return true;
 
@@ -131,6 +129,14 @@
   if (isa<LoadInst>(V))
     return true;
 
+  // The inttoptr case works because isNonEscapingLocalObject considers all
+  // means of converting or equating a pointer to an int (ptrtoint, ptr store
+  // which could be followed by an integer load, ptr<->int compare) as
+  // escaping, and objects located at well-known addresses via platform-specific
+  // means cannot be considered non-escaping local objects.
+  if (isa<IntToPtrInst>(V))
+    return true;
+
   return false;
 }
 
@@ -201,9 +207,11 @@
   // If we have dereferenceability information we know a lower bound for the
   // extent as accesses for a lower offset would be valid. We need to exclude
   // the "or null" part if null is a valid pointer.
-  bool CanBeNull;
-  uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull);
+  bool CanBeNull, CanBeFreed;
+  uint64_t DerefBytes =
+    V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
+  DerefBytes = CanBeFreed ? 0 : DerefBytes;
   // If queried with a precise location size, we assume that location size to be
   // accessed, thus valid.
   if (LocSize.isPrecise())
@@ -222,167 +230,168 @@
 // GetElementPtr Instruction Decomposition and Analysis
 //===----------------------------------------------------------------------===//
 
+namespace {
+/// Represents zext(sext(V)).
+struct ExtendedValue {
+  const Value *V;
+  unsigned ZExtBits;
+  unsigned SExtBits;
+
+  explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0,
+                         unsigned SExtBits = 0)
+      : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {}
+
+  unsigned getBitWidth() const {
+    return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits;
+  }
+
+  ExtendedValue withValue(const Value *NewV) const {
+    return ExtendedValue(NewV, ZExtBits, SExtBits);
+  }
+
+  ExtendedValue withZExtOfValue(const Value *NewV) const {
+    unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
+                        NewV->getType()->getPrimitiveSizeInBits();
+    // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
+    return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0);
+  }
+
+  ExtendedValue withSExtOfValue(const Value *NewV) const {
+    unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
+                        NewV->getType()->getPrimitiveSizeInBits();
+    // zext(sext(sext(NewV)))
+    return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy);
+  }
+
+  APInt evaluateWith(APInt N) const {
+    assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
+           "Incompatible bit width");
+    if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
+    if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
+    return N;
+  }
+
+  bool canDistributeOver(bool NUW, bool NSW) const {
+    // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
+    // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
+    return (!ZExtBits || NUW) && (!SExtBits || NSW);
+  }
+};
+
+/// Represents zext(sext(V)) * Scale + Offset.
+struct LinearExpression {
+  ExtendedValue Val;
+  APInt Scale;
+  APInt Offset;
+
+  /// True if all operations in this expression are NSW.
+  bool IsNSW;
+
+  LinearExpression(const ExtendedValue &Val, const APInt &Scale,
+                   const APInt &Offset, bool IsNSW)
+      : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
+
+  LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) {
+    unsigned BitWidth = Val.getBitWidth();
+    Scale = APInt(BitWidth, 1);
+    Offset = APInt(BitWidth, 0);
+  }
+};
+}
+
 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
 /// B are constant integers.
-///
-/// Returns the scale and offset values as APInts and return V as a Value*, and
-/// return whether we looked through any sign or zero extends.  The incoming
-/// Value is known to have IntegerType, and it may already be sign or zero
-/// extended.
-///
-/// Note that this looks through extends, so the high bits may not be
-/// represented in the result.
-/*static*/ const Value *BasicAAResult::GetLinearExpression(
-    const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
-    unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
-    AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
-  assert(V->getType()->isIntegerTy() && "Not an integer value");
-
+static LinearExpression GetLinearExpression(
+    const ExtendedValue &Val,  const DataLayout &DL, unsigned Depth,
+    AssumptionCache *AC, DominatorTree *DT) {
   // Limit our recursion depth.
-  if (Depth == 6) {
-    Scale = 1;
-    Offset = 0;
-    return V;
-  }
+  if (Depth == 6)
+    return Val;
 
-  if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
-    // If it's a constant, just convert it to an offset and remove the variable.
-    // If we've been called recursively, the Offset bit width will be greater
-    // than the constant's (the Offset's always as wide as the outermost call),
-    // so we'll zext here and process any extension in the isa<SExtInst> &
-    // isa<ZExtInst> cases below.
-    Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
-    assert(Scale == 0 && "Constant values don't have a scale");
-    return V;
-  }
+  if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
+    return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
+                            Val.evaluateWith(Const->getValue()), true);
 
-  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
+  if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
-      // If we've been called recursively, then Offset and Scale will be wider
-      // than the BOp operands. We'll always zext it here as we'll process sign
-      // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
-      APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
+      APInt RHS = Val.evaluateWith(RHSC->getValue());
+      // The only non-OBO case we deal with is or, and only limited to the
+      // case where it is both nuw and nsw.
+      bool NUW = true, NSW = true;
+      if (isa<OverflowingBinaryOperator>(BOp)) {
+        NUW &= BOp->hasNoUnsignedWrap();
+        NSW &= BOp->hasNoSignedWrap();
+      }
+      if (!Val.canDistributeOver(NUW, NSW))
+        return Val;
 
+      LinearExpression E(Val);
       switch (BOp->getOpcode()) {
       default:
         // We don't understand this instruction, so we can't decompose it any
         // further.
-        Scale = 1;
-        Offset = 0;
-        return V;
+        return Val;
       case Instruction::Or:
         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
         // analyze it.
         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
-                               BOp, DT)) {
-          Scale = 1;
-          Offset = 0;
-          return V;
-        }
-        LLVM_FALLTHROUGH;
-      case Instruction::Add:
-        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
-                                SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
-        Offset += RHS;
-        break;
-      case Instruction::Sub:
-        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
-                                SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
-        Offset -= RHS;
-        break;
-      case Instruction::Mul:
-        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
-                                SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
-        Offset *= RHS;
-        Scale *= RHS;
-        break;
-      case Instruction::Shl:
-        V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
-                                SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+                               BOp, DT))
+          return Val;
 
+        LLVM_FALLTHROUGH;
+      case Instruction::Add: {
+        E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
+                                Depth + 1, AC, DT);
+        E.Offset += RHS;
+        E.IsNSW &= NSW;
+        break;
+      }
+      case Instruction::Sub: {
+        E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
+                                Depth + 1, AC, DT);
+        E.Offset -= RHS;
+        E.IsNSW &= NSW;
+        break;
+      }
+      case Instruction::Mul: {
+        E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
+                                Depth + 1, AC, DT);
+        E.Offset *= RHS;
+        E.Scale *= RHS;
+        E.IsNSW &= NSW;
+        break;
+      }
+      case Instruction::Shl:
         // We're trying to linearize an expression of the kind:
         //   shl i8 -128, 36
         // where the shift count exceeds the bitwidth of the type.
         // We can't decompose this further (the expression would return
         // a poison value).
-        if (Offset.getBitWidth() < RHS.getLimitedValue() ||
-            Scale.getBitWidth() < RHS.getLimitedValue()) {
-          Scale = 1;
-          Offset = 0;
-          return V;
-        }
+        if (RHS.getLimitedValue() > Val.getBitWidth())
+          return Val;
 
-        Offset <<= RHS.getLimitedValue();
-        Scale <<= RHS.getLimitedValue();
-        // the semantics of nsw and nuw for left shifts don't match those of
-        // multiplications, so we won't propagate them.
-        NSW = NUW = false;
-        return V;
+        E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
+                                Depth + 1, AC, DT);
+        E.Offset <<= RHS.getLimitedValue();
+        E.Scale <<= RHS.getLimitedValue();
+        E.IsNSW &= NSW;
+        break;
       }
-
-      if (isa<OverflowingBinaryOperator>(BOp)) {
-        NUW &= BOp->hasNoUnsignedWrap();
-        NSW &= BOp->hasNoSignedWrap();
-      }
-      return V;
+      return E;
     }
   }
 
-  // Since GEP indices are sign extended anyway, we don't care about the high
-  // bits of a sign or zero extended value - just scales and offsets.  The
-  // extensions have to be consistent though.
-  if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
-    Value *CastOp = cast<CastInst>(V)->getOperand(0);
-    unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
-    unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
-    unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
-    const Value *Result =
-        GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
-                            Depth + 1, AC, DT, NSW, NUW);
+  if (isa<ZExtInst>(Val.V))
+    return GetLinearExpression(
+        Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
+        DL, Depth + 1, AC, DT);
 
-    // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
-    // by just incrementing the number of bits we've extended by.
-    unsigned ExtendedBy = NewWidth - SmallWidth;
+  if (isa<SExtInst>(Val.V))
+    return GetLinearExpression(
+        Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
+        DL, Depth + 1, AC, DT);
 
-    if (isa<SExtInst>(V) && ZExtBits == 0) {
-      // sext(sext(%x, a), b) == sext(%x, a + b)
-
-      if (NSW) {
-        // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
-        // into sext(%x) + sext(c). We'll sext the Offset ourselves:
-        unsigned OldWidth = Offset.getBitWidth();
-        Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
-      } else {
-        // We may have signed-wrapped, so don't decompose sext(%x + c) into
-        // sext(%x) + sext(c)
-        Scale = 1;
-        Offset = 0;
-        Result = CastOp;
-        ZExtBits = OldZExtBits;
-        SExtBits = OldSExtBits;
-      }
-      SExtBits += ExtendedBy;
-    } else {
-      // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
-
-      if (!NUW) {
-        // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
-        // zext(%x) + zext(c)
-        Scale = 1;
-        Offset = 0;
-        Result = CastOp;
-        ZExtBits = OldZExtBits;
-        SExtBits = OldSExtBits;
-      }
-      ZExtBits += ExtendedBy;
-    }
-
-    return Result;
-  }
-
-  Scale = 1;
-  Offset = 0;
-  return V;
+  return Val;
 }
 
 /// To ensure a pointer offset fits in an integer of size PointerSize
@@ -477,6 +486,13 @@
       return Decomposed;
     }
 
+    // Track whether we've seen at least one in bounds gep, and if so, whether
+    // all geps parsed were in bounds.
+    if (Decomposed.InBounds == None)
+      Decomposed.InBounds = GEPOp->isInBounds();
+    else if (!GEPOp->isInBounds())
+      Decomposed.InBounds = false;
+
     // Don't attempt to analyze GEPs over unsized objects.
     if (!GEPOp->getSourceElementType()->isSized()) {
       Decomposed.Base = V;
@@ -525,20 +541,12 @@
 
       APInt Scale(MaxPointerSize,
                   DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
-      unsigned ZExtBits = 0, SExtBits = 0;
-
       // If the integer type is smaller than the pointer size, it is implicitly
       // sign extended to pointer size.
       unsigned Width = Index->getType()->getIntegerBitWidth();
-      if (PointerSize > Width)
-        SExtBits += PointerSize - Width;
-
-      // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
-      APInt IndexScale(Width, 0), IndexOffset(Width, 0);
-      bool NSW = true, NUW = true;
-      const Value *OrigIndex = Index;
-      Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
-                                  SExtBits, DL, 0, AC, DT, NSW, NUW);
+      unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0;
+      LinearExpression LE = GetLinearExpression(
+          ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT);
 
       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
@@ -551,19 +559,13 @@
       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
       // possibility.
       bool Overflow;
-      APInt ScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize)
+      APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize)
                            .smul_ov(Scale, Overflow);
       if (Overflow) {
-        Index = OrigIndex;
-        IndexScale = 1;
-        IndexOffset = 0;
-
-        ZExtBits = SExtBits = 0;
-        if (PointerSize > Width)
-          SExtBits += PointerSize - Width;
+        LE = LinearExpression(ExtendedValue(Index, 0, SExtBits));
       } else {
         Decomposed.Offset += ScaledOffset;
-        Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
+        Scale *= LE.Scale.sextOrTrunc(MaxPointerSize);
       }
 
       // If we already had an occurrence of this index variable, merge this
@@ -571,9 +573,9 @@
       //   A[x][x] -> x*16 + x*4 -> x*20
       // This also ensures that 'x' only appears in the index list once.
       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
-        if (Decomposed.VarIndices[i].V == Index &&
-            Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
-            Decomposed.VarIndices[i].SExtBits == SExtBits) {
+        if (Decomposed.VarIndices[i].V == LE.Val.V &&
+            Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits &&
+            Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) {
           Scale += Decomposed.VarIndices[i].Scale;
           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
           break;
@@ -585,7 +587,8 @@
       Scale = adjustToPointerSize(Scale, PointerSize);
 
       if (!!Scale) {
-        VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale, CxtI};
+        VariableGEPIndex Entry = {
+            LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits, Scale, CxtI, LE.IsNSW};
         Decomposed.VarIndices.push_back(Entry);
       }
     }
@@ -665,6 +668,11 @@
   return Worklist.empty();
 }
 
+static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
+  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
+  return II && II->getIntrinsicID() == IID;
+}
+
 /// Returns the behavior when calling the given call site.
 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
   if (Call->doesNotAccessMemory())
@@ -764,11 +772,6 @@
   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
 }
 
-static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
-  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
-  return II && II->getIntrinsicID() == IID;
-}
-
 #ifndef NDEBUG
 static const Function *getParent(const Value *V) {
   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
@@ -797,8 +800,7 @@
                                  AAQueryInfo &AAQI) {
   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
          "BasicAliasAnalysis doesn't support interprocedural queries.");
-  return aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, LocB.Size,
-                    LocB.AATags, AAQI);
+  return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI);
 }
 
 /// Checks to see if the specified callsite can clobber the specified memory
@@ -865,10 +867,10 @@
       AliasResult AR = getBestAAResults().alias(
           MemoryLocation::getBeforeOrAfter(*CI),
           MemoryLocation::getBeforeOrAfter(Object), AAQI);
-      if (AR != MustAlias)
+      if (AR != AliasResult::MustAlias)
         IsMustAlias = false;
       // Operand doesn't alias 'Object', continue looking for other aliases
-      if (AR == NoAlias)
+      if (AR == AliasResult::NoAlias)
         continue;
       // Operand aliases 'Object', but call doesn't modify it. Strengthen
       // initial assumption and keep looking in case if there are more aliases.
@@ -910,8 +912,8 @@
   if (isMallocOrCallocLikeFn(Call, &TLI)) {
     // Be conservative if the accessed pointer may alias the allocation -
     // fallback to the generic handling below.
-    if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call),
-                                 Loc, AAQI) == NoAlias)
+    if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc,
+                                 AAQI) == AliasResult::NoAlias)
       return ModRefInfo::NoModRef;
   }
 
@@ -925,22 +927,16 @@
         getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI);
     // It's also possible for Loc to alias both src and dest, or neither.
     ModRefInfo rv = ModRefInfo::NoModRef;
-    if (SrcAA != NoAlias)
+    if (SrcAA != AliasResult::NoAlias)
       rv = setRef(rv);
-    if (DestAA != NoAlias)
+    if (DestAA != AliasResult::NoAlias)
       rv = setMod(rv);
     return rv;
   }
 
-  // While the assume intrinsic is marked as arbitrarily writing so that
-  // proper control dependencies will be maintained, it never aliases any
-  // particular memory location.
-  if (isIntrinsicCall(Call, Intrinsic::assume))
-    return ModRefInfo::NoModRef;
-
-  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
-  // that proper control dependencies are maintained but they never mods any
-  // particular memory location.
+  // Guard intrinsics are marked as arbitrarily writing so that proper control
+  // dependencies are maintained but they never mods any particular memory
+  // location.
   //
   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
   // heap state at the point the guard is issued needs to be consistent in case
@@ -984,16 +980,9 @@
 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
                                         const CallBase *Call2,
                                         AAQueryInfo &AAQI) {
-  // While the assume intrinsic is marked as arbitrarily writing so that
-  // proper control dependencies will be maintained, it never aliases any
-  // particular memory location.
-  if (isIntrinsicCall(Call1, Intrinsic::assume) ||
-      isIntrinsicCall(Call2, Intrinsic::assume))
-    return ModRefInfo::NoModRef;
-
-  // Like assumes, guard intrinsics are also marked as arbitrarily writing so
-  // that proper control dependencies are maintained but they never mod any
-  // particular memory location.
+  // Guard intrinsics are marked as arbitrarily writing so that proper control
+  // dependencies are maintained but they never mods any particular memory
+  // location.
   //
   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
   // heap state at the point the guard is issued needs to be consistent in case
@@ -1016,62 +1005,17 @@
   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
 }
 
-// If a we have (a) a GEP and (b) a pointer based on an alloca, and the
-// beginning of the object the GEP points would have a negative offset with
-// repsect to the alloca, that means the GEP can not alias pointer (b).
-// Note that the pointer based on the alloca may not be a GEP. For
-// example, it may be the alloca itself.
-// The same applies if (b) is based on a GlobalVariable. Note that just being
-// based on isIdentifiedObject() is not enough - we need an identified object
-// that does not permit access to negative offsets. For example, a negative
-// offset from a noalias argument or call can be inbounds w.r.t the actual
-// underlying object.
-//
-// For example, consider:
-//
-//   struct { int f0, int f1, ...} foo;
-//   foo alloca;
-//   foo* random = bar(alloca);
-//   int *f0 = &alloca.f0
-//   int *f1 = &random->f1;
-//
-// Which is lowered, approximately, to:
-//
-//  %alloca = alloca %struct.foo
-//  %random = call %struct.foo* @random(%struct.foo* %alloca)
-//  %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
-//  %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
-//
-// Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
-// by %alloca. Since the %f1 GEP is inbounds, that means %random must also
-// point into the same object. But since %f0 points to the beginning of %alloca,
-// the highest %f1 can be is (%alloca + 3). This means %random can not be higher
-// than (%alloca - 1), and so is not inbounds, a contradiction.
-bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
-      const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
-      LocationSize MaybeObjectAccessSize) {
-  // If the object access size is unknown, or the GEP isn't inbounds, bail.
-  if (!MaybeObjectAccessSize.hasValue() || !GEPOp->isInBounds())
-    return false;
-
-  const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
-
-  // We need the object to be an alloca or a globalvariable, and want to know
-  // the offset of the pointer from the object precisely, so no variable
-  // indices are allowed.
-  if (!(isa<AllocaInst>(DecompObject.Base) ||
-        isa<GlobalVariable>(DecompObject.Base)) ||
-      !DecompObject.VarIndices.empty())
-    return false;
-
-  // If the GEP has no variable indices, we know the precise offset
-  // from the base, then use it. If the GEP has variable indices,
-  // we can't get exact GEP offset to identify pointer alias. So return
-  // false in that case.
-  if (!DecompGEP.VarIndices.empty())
-    return false;
-
-  return DecompGEP.Offset.sge(DecompObject.Offset + (int64_t)ObjectAccessSize);
+/// Return true if we know V to the base address of the corresponding memory
+/// object.  This implies that any address less than V must be out of bounds
+/// for the underlying object.  Note that just being isIdentifiedObject() is
+/// not enough - For example, a negative offset from a noalias argument or call
+/// can be inbounds w.r.t the actual underlying object.
+static bool isBaseOfObject(const Value *V) {
+  // TODO: We can handle other cases here
+  // 1) For GC languages, arguments to functions are often required to be
+  //    base pointers.
+  // 2) Result of allocation routines are often base pointers.  Leverage TLI.
+  return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
 }
 
 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
@@ -1081,9 +1025,24 @@
 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
 /// V2.
 AliasResult BasicAAResult::aliasGEP(
-    const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
-    const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
+    const GEPOperator *GEP1, LocationSize V1Size,
+    const Value *V2, LocationSize V2Size,
     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
+  if (!V1Size.hasValue() && !V2Size.hasValue()) {
+    // TODO: This limitation exists for compile-time reasons. Relax it if we
+    // can avoid exponential pathological cases.
+    if (!isa<GEPOperator>(V2))
+      return AliasResult::MayAlias;
+
+    // If both accesses have unknown size, we can only check whether the base
+    // objects don't alias.
+    AliasResult BaseAlias = getBestAAResults().alias(
+        MemoryLocation::getBeforeOrAfter(UnderlyingV1),
+        MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
+    return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
+                                             : AliasResult::MayAlias;
+  }
+
   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
 
@@ -1091,108 +1050,95 @@
   // compile-time constant.
   if (!DecompGEP1.HasCompileTimeConstantScale ||
       !DecompGEP2.HasCompileTimeConstantScale)
-    return MayAlias;
+    return AliasResult::MayAlias;
 
   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
          "DecomposeGEPExpression returned a result different from "
          "getUnderlyingObject");
 
-  // If the GEP's offset relative to its base is such that the base would
-  // fall below the start of the object underlying V2, then the GEP and V2
-  // cannot alias.
-  if (isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
-    return NoAlias;
-  // If we have two gep instructions with must-alias or not-alias'ing base
-  // pointers, figure out if the indexes to the GEP tell us anything about the
-  // derived pointer.
-  if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
-    // Check for the GEP base being at a negative offset, this time in the other
-    // direction.
-    if (isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
-      return NoAlias;
-    // Do the base pointers alias?
-    AliasResult BaseAlias = getBestAAResults().alias(
-        MemoryLocation::getBeforeOrAfter(UnderlyingV1),
-        MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
+  // Subtract the GEP2 pointer from the GEP1 pointer to find out their
+  // symbolic difference.
+  DecompGEP1.Offset -= DecompGEP2.Offset;
+  GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
 
-    // For GEPs with identical offsets, we can preserve the size and AAInfo
-    // when performing the alias check on the underlying objects.
-    if (BaseAlias == MayAlias && DecompGEP1.Offset == DecompGEP2.Offset &&
-        DecompGEP1.VarIndices == DecompGEP2.VarIndices) {
-      AliasResult PreciseBaseAlias = getBestAAResults().alias(
-          MemoryLocation(UnderlyingV1, V1Size, V1AAInfo),
-          MemoryLocation(UnderlyingV2, V2Size, V2AAInfo), AAQI);
-      if (PreciseBaseAlias == NoAlias)
-        return NoAlias;
-    }
+  // If an inbounds GEP would have to start from an out of bounds address
+  // for the two to alias, then we can assume noalias.
+  if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
+      V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) &&
+      isBaseOfObject(DecompGEP2.Base))
+    return AliasResult::NoAlias;
 
-    // If we get a No or May, then return it immediately, no amount of analysis
-    // will improve this situation.
-    if (BaseAlias != MustAlias) {
-      assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
-      return BaseAlias;
-    }
-
-    // Subtract the GEP2 pointer from the GEP1 pointer to find out their
-    // symbolic difference.
-    DecompGEP1.Offset -= DecompGEP2.Offset;
-    GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
-
-  } else {
-    // Check to see if these two pointers are related by the getelementptr
-    // instruction.  If one pointer is a GEP with a non-zero index of the other
-    // pointer, we know they cannot alias.
-
-    // If both accesses are unknown size, we can't do anything useful here.
-    if (!V1Size.hasValue() && !V2Size.hasValue())
-      return MayAlias;
-
-    AliasResult R = getBestAAResults().alias(
-        MemoryLocation::getBeforeOrAfter(UnderlyingV1),
-        MemoryLocation(V2, V2Size, V2AAInfo), AAQI);
-    if (R != MustAlias) {
-      // If V2 may alias GEP base pointer, conservatively returns MayAlias.
-      // If V2 is known not to alias GEP base pointer, then the two values
-      // cannot alias per GEP semantics: "Any memory access must be done through
-      // a pointer value associated with an address range of the memory access,
-      // otherwise the behavior is undefined.".
-      assert(R == NoAlias || R == MayAlias);
-      return R;
-    }
+  if (isa<GEPOperator>(V2)) {
+    // Symmetric case to above.
+    if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
+        V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) &&
+        isBaseOfObject(DecompGEP1.Base))
+      return AliasResult::NoAlias;
   }
 
-  // In the two GEP Case, if there is no difference in the offsets of the
-  // computed pointers, the resultant pointers are a must alias.  This
-  // happens when we have two lexically identical GEP's (for example).
-  //
-  // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
-  // must aliases the GEP, the end result is a must alias also.
+  // For GEPs with identical offsets, we can preserve the size and AAInfo
+  // when performing the alias check on the underlying objects.
   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
-    return MustAlias;
+    return getBestAAResults().alias(
+        MemoryLocation(UnderlyingV1, V1Size),
+        MemoryLocation(UnderlyingV2, V2Size), AAQI);
+
+  // Do the base pointers alias?
+  AliasResult BaseAlias = getBestAAResults().alias(
+      MemoryLocation::getBeforeOrAfter(UnderlyingV1),
+      MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
+
+  // If we get a No or May, then return it immediately, no amount of analysis
+  // will improve this situation.
+  if (BaseAlias != AliasResult::MustAlias) {
+    assert(BaseAlias == AliasResult::NoAlias ||
+           BaseAlias == AliasResult::MayAlias);
+    return BaseAlias;
+  }
 
   // If there is a constant difference between the pointers, but the difference
   // is less than the size of the associated memory object, then we know
   // that the objects are partially overlapping.  If the difference is
   // greater, we know they do not overlap.
   if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) {
-    if (DecompGEP1.Offset.sge(0)) {
-      if (V2Size.hasValue()) {
-        if (DecompGEP1.Offset.ult(V2Size.getValue()))
-          return PartialAlias;
-        return NoAlias;
-      }
-    } else {
-      // We have the situation where:
+    APInt &Off = DecompGEP1.Offset;
+
+    // Initialize for Off >= 0 (V2 <= GEP1) case.
+    const Value *LeftPtr = V2;
+    const Value *RightPtr = GEP1;
+    LocationSize VLeftSize = V2Size;
+    LocationSize VRightSize = V1Size;
+    const bool Swapped = Off.isNegative();
+
+    if (Swapped) {
+      // Swap if we have the situation where:
       // +                +
       // | BaseOffset     |
       // ---------------->|
       // |-->V1Size       |-------> V2Size
       // GEP1             V2
-      if (V1Size.hasValue()) {
-        if ((-DecompGEP1.Offset).ult(V1Size.getValue()))
-          return PartialAlias;
-        return NoAlias;
+      std::swap(LeftPtr, RightPtr);
+      std::swap(VLeftSize, VRightSize);
+      Off = -Off;
+    }
+
+    if (VLeftSize.hasValue()) {
+      const uint64_t LSize = VLeftSize.getValue();
+      if (Off.ult(LSize)) {
+        // Conservatively drop processing if a phi was visited and/or offset is
+        // too big.
+        AliasResult AR = AliasResult::PartialAlias;
+        if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
+            (Off + VRightSize.getValue()).ule(LSize)) {
+          // Memory referenced by right pointer is nested. Save the offset in
+          // cache. Note that originally offset estimated as GEP1-V2, but
+          // AliasResult contains the shift that represents GEP1+Offset=V2.
+          AR.setOffset(-Off.getSExtValue());
+          AR.swap(Swapped);
+        }
+        return AR;
       }
+      return AliasResult::NoAlias;
     }
   }
 
@@ -1201,11 +1147,16 @@
     bool AllNonNegative = DecompGEP1.Offset.isNonNegative();
     bool AllNonPositive = DecompGEP1.Offset.isNonPositive();
     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
-      const APInt &Scale = DecompGEP1.VarIndices[i].Scale;
+      APInt Scale = DecompGEP1.VarIndices[i].Scale;
+      APInt ScaleForGCD = DecompGEP1.VarIndices[i].Scale;
+      if (!DecompGEP1.VarIndices[i].IsNSW)
+        ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(),
+                                          Scale.countTrailingZeros());
+
       if (i == 0)
-        GCD = Scale.abs();
+        GCD = ScaleForGCD.abs();
       else
-        GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs());
+        GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
 
       if (AllNonNegative || AllNonPositive) {
         // If the Value could change between cycles, then any reasoning about
@@ -1243,7 +1194,7 @@
     if (V1Size.hasValue() && V2Size.hasValue() &&
         ModOffset.uge(V2Size.getValue()) &&
         (GCD - ModOffset).uge(V1Size.getValue()))
-      return NoAlias;
+      return AliasResult::NoAlias;
 
     // If we know all the variables are non-negative, then the total offset is
     // also non-negative and >= DecompGEP1.Offset. We have the following layout:
@@ -1251,14 +1202,14 @@
     // If DecompGEP1.Offset >= V2Size, the accesses don't alias.
     if (AllNonNegative && V2Size.hasValue() &&
         DecompGEP1.Offset.uge(V2Size.getValue()))
-      return NoAlias;
+      return AliasResult::NoAlias;
     // Similarly, if the variables are non-positive, then the total offset is
     // also non-positive and <= DecompGEP1.Offset. We have the following layout:
     // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size)
     // If -DecompGEP1.Offset >= V1Size, the accesses don't alias.
     if (AllNonPositive && V1Size.hasValue() &&
         (-DecompGEP1.Offset).uge(V1Size.getValue()))
-      return NoAlias;
+      return AliasResult::NoAlias;
 
     if (V1Size.hasValue() && V2Size.hasValue()) {
       // Try to determine whether abs(VarIndex) > 0.
@@ -1289,19 +1240,19 @@
         // or higher both do not alias.
         if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
             OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
-          return NoAlias;
+          return AliasResult::NoAlias;
       }
     }
 
     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
                                 DecompGEP1.Offset, &AC, DT))
-      return NoAlias;
+      return AliasResult::NoAlias;
   }
 
   // Statically, we can see that the base objects are the same, but the
   // pointers have dynamic offsets which we can't resolve. And none of our
   // little tricks above worked.
-  return MayAlias;
+  return AliasResult::MayAlias;
 }
 
 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
@@ -1309,56 +1260,55 @@
   if (A == B)
     return A;
   // A mix of PartialAlias and MustAlias is PartialAlias.
-  if ((A == PartialAlias && B == MustAlias) ||
-      (B == PartialAlias && A == MustAlias))
-    return PartialAlias;
+  if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
+      (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
+    return AliasResult::PartialAlias;
   // Otherwise, we don't know anything.
-  return MayAlias;
+  return AliasResult::MayAlias;
 }
 
 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
 /// against another.
 AliasResult
 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
-                           const AAMDNodes &SIAAInfo, const Value *V2,
-                           LocationSize V2Size, const AAMDNodes &V2AAInfo,
+                           const Value *V2, LocationSize V2Size,
                            AAQueryInfo &AAQI) {
   // If the values are Selects with the same condition, we can do a more precise
   // check: just check for aliases between the values on corresponding arms.
   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
     if (SI->getCondition() == SI2->getCondition()) {
       AliasResult Alias = getBestAAResults().alias(
-          MemoryLocation(SI->getTrueValue(), SISize, SIAAInfo),
-          MemoryLocation(SI2->getTrueValue(), V2Size, V2AAInfo), AAQI);
-      if (Alias == MayAlias)
-        return MayAlias;
+          MemoryLocation(SI->getTrueValue(), SISize),
+          MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
+      if (Alias == AliasResult::MayAlias)
+        return AliasResult::MayAlias;
       AliasResult ThisAlias = getBestAAResults().alias(
-          MemoryLocation(SI->getFalseValue(), SISize, SIAAInfo),
-          MemoryLocation(SI2->getFalseValue(), V2Size, V2AAInfo), AAQI);
+          MemoryLocation(SI->getFalseValue(), SISize),
+          MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
       return MergeAliasResults(ThisAlias, Alias);
     }
 
   // If both arms of the Select node NoAlias or MustAlias V2, then returns
   // NoAlias / MustAlias. Otherwise, returns MayAlias.
   AliasResult Alias = getBestAAResults().alias(
-      MemoryLocation(V2, V2Size, V2AAInfo),
-      MemoryLocation(SI->getTrueValue(), SISize, SIAAInfo), AAQI);
-  if (Alias == MayAlias)
-    return MayAlias;
+      MemoryLocation(V2, V2Size),
+      MemoryLocation(SI->getTrueValue(), SISize), AAQI);
+  if (Alias == AliasResult::MayAlias)
+    return AliasResult::MayAlias;
 
   AliasResult ThisAlias = getBestAAResults().alias(
-      MemoryLocation(V2, V2Size, V2AAInfo),
-      MemoryLocation(SI->getFalseValue(), SISize, SIAAInfo), AAQI);
+      MemoryLocation(V2, V2Size),
+      MemoryLocation(SI->getFalseValue(), SISize), AAQI);
   return MergeAliasResults(ThisAlias, Alias);
 }
 
 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
 /// another.
 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
-                                    const AAMDNodes &PNAAInfo, const Value *V2,
-                                    LocationSize V2Size,
-                                    const AAMDNodes &V2AAInfo,
+                                    const Value *V2, LocationSize V2Size,
                                     AAQueryInfo &AAQI) {
+  if (!PN->getNumIncomingValues())
+    return AliasResult::NoAlias;
   // If the values are PHIs in the same block, we can do a more precise
   // as well as efficient check: just check for aliases between the values
   // on corresponding edges.
@@ -1367,16 +1317,15 @@
       Optional<AliasResult> Alias;
       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
         AliasResult ThisAlias = getBestAAResults().alias(
-            MemoryLocation(PN->getIncomingValue(i), PNSize, PNAAInfo),
+            MemoryLocation(PN->getIncomingValue(i), PNSize),
             MemoryLocation(
-                PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size,
-                V2AAInfo),
+                PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
             AAQI);
         if (Alias)
           *Alias = MergeAliasResults(*Alias, ThisAlias);
         else
           Alias = ThisAlias;
-        if (*Alias == MayAlias)
+        if (*Alias == AliasResult::MayAlias)
           break;
       }
       return *Alias;
@@ -1405,7 +1354,7 @@
     // is if both sides are PHI nodes. In which case, this is O(m x n) time
     // where 'm' and 'n' are the number of PHI sources.
     if (PhiValueSet.size() > MaxLookupSearchDepth)
-      return MayAlias;
+      return AliasResult::MayAlias;
     // Add the values to V1Srcs
     for (Value *PV1 : PhiValueSet) {
       if (CheckForRecPhi(PV1))
@@ -1416,13 +1365,19 @@
     // If we don't have PhiInfo then just look at the operands of the phi itself
     // FIXME: Remove this once we can guarantee that we have PhiInfo always
     SmallPtrSet<Value *, 4> UniqueSrc;
+    Value *OnePhi = nullptr;
     for (Value *PV1 : PN->incoming_values()) {
-      if (isa<PHINode>(PV1))
-        // If any of the source itself is a PHI, return MayAlias conservatively
-        // to avoid compile time explosion. The worst possible case is if both
-        // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
-        // and 'n' are the number of PHI sources.
-        return MayAlias;
+      if (isa<PHINode>(PV1)) {
+        if (OnePhi && OnePhi != PV1) {
+          // To control potential compile time explosion, we choose to be
+          // conserviate when we have more than one Phi input.  It is important
+          // that we handle the single phi case as that lets us handle LCSSA
+          // phi nodes and (combined with the recursive phi handling) simple
+          // pointer induction variable patterns.
+          return AliasResult::MayAlias;
+        }
+        OnePhi = PV1;
+      }
 
       if (CheckForRecPhi(PV1))
         continue;
@@ -1430,13 +1385,18 @@
       if (UniqueSrc.insert(PV1).second)
         V1Srcs.push_back(PV1);
     }
+
+    if (OnePhi && UniqueSrc.size() > 1)
+      // Out of an abundance of caution, allow only the trivial lcssa and
+      // recursive phi cases.
+      return AliasResult::MayAlias;
   }
 
   // If V1Srcs is empty then that means that the phi has no underlying non-phi
   // value. This should only be possible in blocks unreachable from the entry
   // block, but return MayAlias just in case.
   if (V1Srcs.empty())
-    return MayAlias;
+    return AliasResult::MayAlias;
 
   // If this PHI node is recursive, indicate that the pointer may be moved
   // across iterations. We can only prove NoAlias if different underlying
@@ -1456,21 +1416,21 @@
   // If we inserted a block into VisitedPhiBBs, alias analysis results that
   // have been cached earlier may no longer be valid. Perform recursive queries
   // with a new AAQueryInfo.
-  AAQueryInfo NewAAQI;
+  AAQueryInfo NewAAQI = AAQI.withEmptyCache();
   AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI;
 
   AliasResult Alias = getBestAAResults().alias(
-      MemoryLocation(V2, V2Size, V2AAInfo),
-      MemoryLocation(V1Srcs[0], PNSize, PNAAInfo), *UseAAQI);
+      MemoryLocation(V2, V2Size),
+      MemoryLocation(V1Srcs[0], PNSize), *UseAAQI);
 
   // Early exit if the check of the first PHI source against V2 is MayAlias.
   // Other results are not possible.
-  if (Alias == MayAlias)
-    return MayAlias;
+  if (Alias == AliasResult::MayAlias)
+    return AliasResult::MayAlias;
   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
   // remain valid to all elements and needs to conservatively return MayAlias.
-  if (isRecursive && Alias != NoAlias)
-    return MayAlias;
+  if (isRecursive && Alias != AliasResult::NoAlias)
+    return AliasResult::MayAlias;
 
   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
   // NoAlias / MustAlias. Otherwise, returns MayAlias.
@@ -1478,10 +1438,9 @@
     Value *V = V1Srcs[i];
 
     AliasResult ThisAlias = getBestAAResults().alias(
-        MemoryLocation(V2, V2Size, V2AAInfo),
-        MemoryLocation(V, PNSize, PNAAInfo), *UseAAQI);
+        MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI);
     Alias = MergeAliasResults(ThisAlias, Alias);
-    if (Alias == MayAlias)
+    if (Alias == AliasResult::MayAlias)
       break;
   }
 
@@ -1491,23 +1450,21 @@
 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
 /// array references.
 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
-                                      const AAMDNodes &V1AAInfo,
                                       const Value *V2, LocationSize V2Size,
-                                      const AAMDNodes &V2AAInfo,
                                       AAQueryInfo &AAQI) {
   // If either of the memory references is empty, it doesn't matter what the
   // pointer values are.
   if (V1Size.isZero() || V2Size.isZero())
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   // Strip off any casts if they exist.
-  V1 = V1->stripPointerCastsAndInvariantGroups();
-  V2 = V2->stripPointerCastsAndInvariantGroups();
+  V1 = V1->stripPointerCastsForAliasAnalysis();
+  V2 = V2->stripPointerCastsForAliasAnalysis();
 
   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
   // value for undef that aliases nothing in the program.
   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   // Are we checking for alias of the same value?
   // Because we look 'through' phi nodes, we could look at "Value" pointers from
@@ -1516,10 +1473,10 @@
   // happen by looking at the visited phi nodes and making sure they cannot
   // reach the value.
   if (isValueEqualInPotentialCycles(V1, V2))
-    return MustAlias;
+    return AliasResult::MustAlias;
 
   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
-    return NoAlias; // Scalars cannot alias each other
+    return AliasResult::NoAlias; // Scalars cannot alias each other
 
   // Figure out what objects these things are pointing to if we can.
   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
@@ -1529,26 +1486,26 @@
   // don't alias any other pointer.
   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
-      return NoAlias;
+      return AliasResult::NoAlias;
   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
-      return NoAlias;
+      return AliasResult::NoAlias;
 
   if (O1 != O2) {
     // If V1/V2 point to two different objects, we know that we have no alias.
     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
-      return NoAlias;
+      return AliasResult::NoAlias;
 
     // Constant pointers can't alias with non-const isIdentifiedObject objects.
     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
-      return NoAlias;
+      return AliasResult::NoAlias;
 
     // Function arguments can't alias with things that are known to be
     // unambigously identified at the function level.
     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
-      return NoAlias;
+      return AliasResult::NoAlias;
 
     // If one pointer is the result of a call/invoke or load and the other is a
     // non-escaping local object within the same function, then we know the
@@ -1561,10 +1518,10 @@
     // nocapture value to other functions as long as they don't capture it.
     if (isEscapeSource(O1) &&
         isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
-      return NoAlias;
+      return AliasResult::NoAlias;
     if (isEscapeSource(O2) &&
         isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
-      return NoAlias;
+      return AliasResult::NoAlias;
   }
 
   // If the size of one access is larger than the entire object on the other
@@ -1576,7 +1533,7 @@
       (isObjectSmallerThan(
           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
           TLI, NullIsValidLocation)))
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   // If one the accesses may be before the accessed pointer, canonicalize this
   // by using unknown after-pointer sizes for both accesses. This is
@@ -1590,14 +1547,21 @@
     V2Size = LocationSize::afterPointer();
   }
 
+  // FIXME: If this depth limit is hit, then we may cache sub-optimal results
+  // for recursive queries. For this reason, this limit is chosen to be large
+  // enough to be very rarely hit, while still being small enough to avoid
+  // stack overflows.
+  if (AAQI.Depth >= 512)
+    return AliasResult::MayAlias;
+
   // Check the cache before climbing up use-def chains. This also terminates
   // otherwise infinitely recursive queries.
-  AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
-                            MemoryLocation(V2, V2Size, V2AAInfo));
-  if (V1 > V2)
+  AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size});
+  const bool Swapped = V1 > V2;
+  if (Swapped)
     std::swap(Locs.first, Locs.second);
   const auto &Pair = AAQI.AliasCache.try_emplace(
-      Locs, AAQueryInfo::CacheEntry{NoAlias, 0});
+      Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
   if (!Pair.second) {
     auto &Entry = Pair.first->second;
     if (!Entry.isDefinitive()) {
@@ -1605,26 +1569,32 @@
       ++Entry.NumAssumptionUses;
       ++AAQI.NumAssumptionUses;
     }
-    return Entry.Result;
+    // Cache contains sorted {V1,V2} pairs but we should return original order.
+    auto Result = Entry.Result;
+    Result.swap(Swapped);
+    return Result;
   }
 
   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
-  AliasResult Result = aliasCheckRecursive(V1, V1Size, V1AAInfo, V2, V2Size,
-                                           V2AAInfo, AAQI, O1, O2);
+  AliasResult Result =
+      aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
 
   auto It = AAQI.AliasCache.find(Locs);
   assert(It != AAQI.AliasCache.end() && "Must be in cache");
   auto &Entry = It->second;
 
   // Check whether a NoAlias assumption has been used, but disproven.
-  bool AssumptionDisproven = Entry.NumAssumptionUses > 0 && Result != NoAlias;
+  bool AssumptionDisproven =
+      Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
   if (AssumptionDisproven)
-    Result = MayAlias;
+    Result = AliasResult::MayAlias;
 
   // This is a definitive result now, when considered as a root query.
   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
   Entry.Result = Result;
+  // Cache contains sorted {V1,V2} pairs.
+  Entry.Result.swap(Swapped);
   Entry.NumAssumptionUses = -1;
 
   // If the assumption has been disproven, remove any results that may have
@@ -1636,48 +1606,43 @@
 
   // The result may still be based on assumptions higher up in the chain.
   // Remember it, so it can be purged from the cache later.
-  if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && Result != MayAlias)
+  if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
+      Result != AliasResult::MayAlias)
     AAQI.AssumptionBasedResults.push_back(Locs);
   return Result;
 }
 
 AliasResult BasicAAResult::aliasCheckRecursive(
-    const Value *V1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
-    const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
+    const Value *V1, LocationSize V1Size,
+    const Value *V2, LocationSize V2Size,
     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
-    AliasResult Result =
-        aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
-    if (Result != MayAlias)
+    AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
+    if (Result != AliasResult::MayAlias)
       return Result;
   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
-    AliasResult Result =
-        aliasGEP(GV2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O2, O1, AAQI);
-    if (Result != MayAlias)
+    AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
+    if (Result != AliasResult::MayAlias)
       return Result;
   }
 
   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
-    AliasResult Result =
-        aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, AAQI);
-    if (Result != MayAlias)
+    AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
+    if (Result != AliasResult::MayAlias)
       return Result;
   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
-    AliasResult Result =
-        aliasPHI(PN, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, AAQI);
-    if (Result != MayAlias)
+    AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
+    if (Result != AliasResult::MayAlias)
       return Result;
   }
 
   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
-    AliasResult Result =
-        aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, AAQI);
-    if (Result != MayAlias)
+    AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
+    if (Result != AliasResult::MayAlias)
       return Result;
   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
-    AliasResult Result =
-        aliasSelect(S2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, AAQI);
-    if (Result != MayAlias)
+    AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
+    if (Result != AliasResult::MayAlias)
       return Result;
   }
 
@@ -1688,10 +1653,10 @@
     if (V1Size.isPrecise() && V2Size.isPrecise() &&
         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
-      return PartialAlias;
+      return AliasResult::PartialAlias;
   }
 
-  return MayAlias;
+  return AliasResult::MayAlias;
 }
 
 /// Check whether two Values can be considered equivalent.
@@ -1720,7 +1685,7 @@
   // the Values cannot come from different iterations of a potential cycle the
   // phi nodes could be involved in.
   for (auto *P : VisitedPhiBBs)
-    if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI))
+    if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT))
       return false;
 
   return true;
@@ -1750,9 +1715,10 @@
 
       // If we found it, subtract off Scale V's from the entry in Dest.  If it
       // goes to zero, remove the entry.
-      if (Dest[j].Scale != Scale)
+      if (Dest[j].Scale != Scale) {
         Dest[j].Scale -= Scale;
-      else
+        Dest[j].IsNSW = false;
+      } else
         Dest.erase(Dest.begin() + j);
       Scale = 0;
       break;
@@ -1760,7 +1726,8 @@
 
     // If we didn't consume this entry, add it to the end of the Dest list.
     if (!!Scale) {
-      VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale, Src[i].CxtI};
+      VariableGEPIndex Entry = {V,      ZExtBits,    SExtBits,
+                                -Scale, Src[i].CxtI, Src[i].IsNSW};
       Dest.push_back(Entry);
     }
   }
@@ -1780,28 +1747,20 @@
   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
 
   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
-      Var0.Scale != -Var1.Scale)
+      Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType())
     return false;
 
-  unsigned Width = Var1.V->getType()->getIntegerBitWidth();
-
   // We'll strip off the Extensions of Var0 and Var1 and do another round
   // of GetLinearExpression decomposition. In the example above, if Var0
   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
 
-  APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
-      V1Offset(Width, 0);
-  bool NSW = true, NUW = true;
-  unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
-  const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
-                                        V0SExtBits, DL, 0, AC, DT, NSW, NUW);
-  NSW = true;
-  NUW = true;
-  const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
-                                        V1SExtBits, DL, 0, AC, DT, NSW, NUW);
-
-  if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
-      V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
+  LinearExpression E0 =
+      GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT);
+  LinearExpression E1 =
+      GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT);
+  if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits ||
+      E0.Val.SExtBits != E1.Val.SExtBits ||
+      !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V))
     return false;
 
   // We have a hit - Var0 and Var1 only differ by a constant offset!
@@ -1811,7 +1770,7 @@
   // minimum difference between the two. The minimum distance may occur due to
   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
   // the minimum distance between %i and %i + 5 is 3.
-  APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
+  APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
   MinDiff = APIntOps::umin(MinDiff, Wrapped);
   APInt MinDiffBytes =
     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
@@ -1834,9 +1793,8 @@
   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
   auto &AC = AM.getResult<AssumptionAnalysis>(F);
   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
-  auto *LI = AM.getCachedResult<LoopAnalysis>(F);
   auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F);
-  return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, LI, PV);
+  return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV);
 }
 
 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
@@ -1864,13 +1822,11 @@
   auto &ACT = getAnalysis<AssumptionCacheTracker>();
   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
-  auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
 
   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
                                  &DTWP.getDomTree(),
-                                 LIWP ? &LIWP->getLoopInfo() : nullptr,
                                  PVWP ? &PVWP->getResult() : nullptr));
 
   return false;
diff --git a/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfo.cpp b/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfo.cpp
index b9b1fde..b464071 100644
--- a/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfo.cpp
@@ -47,6 +47,7 @@
                clEnumValN(GVDT_Count, "count", "display a graph using the real "
                                                "profile count if available.")));
 
+namespace llvm {
 cl::opt<std::string>
     ViewBlockFreqFuncName("view-bfi-func-name", cl::Hidden,
                           cl::desc("The option to specify "
@@ -86,6 +87,7 @@
     "print-bfi-func-name", cl::Hidden,
     cl::desc("The option to specify the name of the function "
              "whose block frequency info is printed."));
+} // namespace llvm
 
 namespace llvm {
 
diff --git a/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp b/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
index e4fda24..e4e45b3 100644
--- a/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/BlockFrequencyInfoImpl.cpp
@@ -40,12 +40,28 @@
 
 #define DEBUG_TYPE "block-freq"
 
+namespace llvm {
 cl::opt<bool> CheckBFIUnknownBlockQueries(
     "check-bfi-unknown-block-queries",
     cl::init(false), cl::Hidden,
     cl::desc("Check if block frequency is queried for an unknown block "
              "for debugging missed BFI updates"));
 
+cl::opt<bool> UseIterativeBFIInference(
+    "use-iterative-bfi-inference", cl::init(false), cl::Hidden, cl::ZeroOrMore,
+    cl::desc("Apply an iterative post-processing to infer correct BFI counts"));
+
+cl::opt<unsigned> IterativeBFIMaxIterationsPerBlock(
+    "iterative-bfi-max-iterations-per-block", cl::init(1000), cl::Hidden,
+    cl::desc("Iterative inference: maximum number of update iterations "
+             "per block"));
+
+cl::opt<double> IterativeBFIPrecision(
+    "iterative-bfi-precision", cl::init(1e-12), cl::Hidden,
+    cl::desc("Iterative inference: delta convergence precision; smaller values "
+             "typically lead to better results at the cost of worsen runtime"));
+}
+
 ScaledNumber<uint64_t> BlockMass::toScaled() const {
   if (isFull())
     return ScaledNumber<uint64_t>(1, 0);
diff --git a/src/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp b/src/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp
index 884ba48..aa6b93f 100644
--- a/src/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/BranchProbabilityInfo.cpp
@@ -550,8 +550,7 @@
   WorkList.push_back(CmpPHI);
   VisitedInsts.insert(CmpPHI);
   while (!WorkList.empty()) {
-    PHINode *P = WorkList.back();
-    WorkList.pop_back();
+    PHINode *P = WorkList.pop_back_val();
     for (BasicBlock *B : P->blocks()) {
       // Skip blocks that aren't part of the loop
       if (!L->contains(B))
@@ -841,8 +840,7 @@
   SmallVector<uint32_t, 4> SuccWeights;
   uint64_t TotalWeight = 0;
   // Go over all successors of BB and put their weights into SuccWeights.
-  for (const_succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
-    const BasicBlock *SuccBB = *I;
+  for (const BasicBlock *SuccBB : successors(BB)) {
     Optional<uint32_t> Weight;
     const LoopBlock SuccLoopBB = getLoopBlock(SuccBB);
     const LoopEdge Edge{LoopBB, SuccLoopBB};
@@ -1095,10 +1093,8 @@
   // or the function it is currently running over.
   assert(LastF && "Cannot print prior to running over a function");
   for (const auto &BI : *LastF) {
-    for (const_succ_iterator SI = succ_begin(&BI), SE = succ_end(&BI); SI != SE;
-         ++SI) {
-      printEdgeProbability(OS << "  ", &BI, *SI);
-    }
+    for (const BasicBlock *Succ : successors(&BI))
+      printEdgeProbability(OS << "  ", &BI, Succ);
   }
 }
 
@@ -1109,26 +1105,6 @@
   return getEdgeProbability(Src, Dst) > BranchProbability(4, 5);
 }
 
-const BasicBlock *
-BranchProbabilityInfo::getHotSucc(const BasicBlock *BB) const {
-  auto MaxProb = BranchProbability::getZero();
-  const BasicBlock *MaxSucc = nullptr;
-
-  for (const auto *Succ : successors(BB)) {
-    auto Prob = getEdgeProbability(BB, Succ);
-    if (Prob > MaxProb) {
-      MaxProb = Prob;
-      MaxSucc = Succ;
-    }
-  }
-
-  // Hot probability is at least 4/5 = 80%
-  if (MaxProb > BranchProbability(4, 5))
-    return MaxSucc;
-
-  return nullptr;
-}
-
 /// Get the raw edge probability for the edge. If can't find it, return a
 /// default probability 1/N where N is the number of successors. Here an edge is
 /// specified using PredBlock and an
@@ -1195,6 +1171,7 @@
   // should be within Probs.size / BranchProbability::getDenominator.
   assert(TotalNumerator <= BranchProbability::getDenominator() + Probs.size());
   assert(TotalNumerator >= BranchProbability::getDenominator() - Probs.size());
+  (void)TotalNumerator;
 }
 
 void BranchProbabilityInfo::copyEdgeProbabilities(BasicBlock *Src,
diff --git a/src/llvm-project/llvm/lib/Analysis/CFG.cpp b/src/llvm-project/llvm/lib/Analysis/CFG.cpp
index 33602ed..ec25ee1 100644
--- a/src/llvm-project/llvm/lib/Analysis/CFG.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CFG.cpp
@@ -208,16 +208,29 @@
   return false;
 }
 
-bool llvm::isPotentiallyReachable(const BasicBlock *A, const BasicBlock *B,
-                                  const DominatorTree *DT, const LoopInfo *LI) {
+bool llvm::isPotentiallyReachable(
+    const BasicBlock *A, const BasicBlock *B,
+    const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
+    const LoopInfo *LI) {
   assert(A->getParent() == B->getParent() &&
          "This analysis is function-local!");
 
+  if (DT) {
+    if (DT->isReachableFromEntry(A) && !DT->isReachableFromEntry(B))
+      return false;
+    if (!ExclusionSet || ExclusionSet->empty()) {
+      if (A->isEntryBlock() && DT->isReachableFromEntry(B))
+        return true;
+      if (B->isEntryBlock() && DT->isReachableFromEntry(A))
+        return false;
+    }
+  }
+
   SmallVector<BasicBlock*, 32> Worklist;
   Worklist.push_back(const_cast<BasicBlock*>(A));
 
   return isPotentiallyReachableFromMany(Worklist, const_cast<BasicBlock *>(B),
-                                        nullptr, DT, LI);
+                                        ExclusionSet, DT, LI);
 }
 
 bool llvm::isPotentiallyReachable(
@@ -227,8 +240,6 @@
   assert(A->getParent()->getParent() == B->getParent()->getParent() &&
          "This analysis is function-local!");
 
-  SmallVector<BasicBlock*, 32> Worklist;
-
   if (A->getParent() == B->getParent()) {
     // The same block case is special because it's the only time we're looking
     // within a single block to see which instruction comes first. Once we
@@ -242,43 +253,28 @@
     if (LI && LI->getLoopFor(BB) != nullptr)
       return true;
 
-    // Linear scan, start at 'A', see whether we hit 'B' or the end first.
-    for (BasicBlock::const_iterator I = A->getIterator(), E = BB->end(); I != E;
-         ++I) {
-      if (&*I == B)
-        return true;
-    }
+    // If A comes before B, then B is definitively reachable from A.
+    if (A == B || A->comesBefore(B))
+      return true;
 
     // Can't be in a loop if it's the entry block -- the entry block may not
     // have predecessors.
-    if (BB == &BB->getParent()->getEntryBlock())
+    if (BB->isEntryBlock())
       return false;
 
     // Otherwise, continue doing the normal per-BB CFG walk.
+    SmallVector<BasicBlock*, 32> Worklist;
     Worklist.append(succ_begin(BB), succ_end(BB));
-
     if (Worklist.empty()) {
       // We've proven that there's no path!
       return false;
     }
-  } else {
-    Worklist.push_back(const_cast<BasicBlock*>(A->getParent()));
+
+    return isPotentiallyReachableFromMany(
+        Worklist, const_cast<BasicBlock *>(B->getParent()), ExclusionSet,
+        DT, LI);
   }
 
-  if (DT) {
-    if (DT->isReachableFromEntry(A->getParent()) &&
-        !DT->isReachableFromEntry(B->getParent()))
-      return false;
-    if (!ExclusionSet || ExclusionSet->empty()) {
-      if (A->getParent() == &A->getParent()->getParent()->getEntryBlock() &&
-          DT->isReachableFromEntry(B->getParent()))
-        return true;
-      if (B->getParent() == &A->getParent()->getParent()->getEntryBlock() &&
-          DT->isReachableFromEntry(A->getParent()))
-        return false;
-    }
-  }
-
-  return isPotentiallyReachableFromMany(
-      Worklist, const_cast<BasicBlock *>(B->getParent()), ExclusionSet, DT, LI);
+  return isPotentiallyReachable(
+      A->getParent(), B->getParent(), ExclusionSet, DT, LI);
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/CFGPrinter.cpp b/src/llvm-project/llvm/lib/Analysis/CFGPrinter.cpp
index 33b5a46..04ccdc5 100644
--- a/src/llvm-project/llvm/lib/Analysis/CFGPrinter.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CFGPrinter.cpp
@@ -42,6 +42,10 @@
 static cl::opt<bool> HideDeoptimizePaths("cfg-hide-deoptimize-paths",
                                          cl::init(false));
 
+static cl::opt<double> HideColdPaths(
+    "cfg-hide-cold-paths", cl::init(0.0),
+    cl::desc("Hide blocks with relative frequency below the given value"));
+
 static cl::opt<bool> ShowHeatColors("cfg-heat-colors", cl::init(true),
                                     cl::Hidden,
                                     cl::desc("Show heat colors in CFG"));
@@ -63,7 +67,7 @@
   errs() << "Writing '" << Filename << "'...";
 
   std::error_code EC;
-  raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
+  raw_fd_ostream File(Filename, EC, sys::fs::OF_Text);
 
   DOTFuncInfo CFGInfo(&F, BFI, BPI, MaxFreq);
   CFGInfo.setHeatColors(ShowHeatColors);
@@ -96,6 +100,8 @@
   }
 
   bool runOnFunction(Function &F) override {
+    if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+      return false;
     auto *BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
     viewCFG(F, BFI, BPI, getMaxFreq(F, BFI));
@@ -111,13 +117,15 @@
     AU.setPreservesAll();
   }
 };
-}
+} // namespace
 
 char CFGViewerLegacyPass::ID = 0;
 INITIALIZE_PASS(CFGViewerLegacyPass, "view-cfg", "View CFG of function", false,
                 true)
 
 PreservedAnalyses CFGViewerPass::run(Function &F, FunctionAnalysisManager &AM) {
+  if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+    return PreservedAnalyses::all();
   auto *BFI = &AM.getResult<BlockFrequencyAnalysis>(F);
   auto *BPI = &AM.getResult<BranchProbabilityAnalysis>(F);
   viewCFG(F, BFI, BPI, getMaxFreq(F, BFI));
@@ -132,6 +140,8 @@
   }
 
   bool runOnFunction(Function &F) override {
+    if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+      return false;
     auto *BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
     viewCFG(F, BFI, BPI, getMaxFreq(F, BFI), /*CFGOnly=*/true);
@@ -147,7 +157,7 @@
     AU.setPreservesAll();
   }
 };
-}
+} // namespace
 
 char CFGOnlyViewerLegacyPass::ID = 0;
 INITIALIZE_PASS(CFGOnlyViewerLegacyPass, "view-cfg-only",
@@ -155,6 +165,8 @@
 
 PreservedAnalyses CFGOnlyViewerPass::run(Function &F,
                                          FunctionAnalysisManager &AM) {
+  if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+    return PreservedAnalyses::all();
   auto *BFI = &AM.getResult<BlockFrequencyAnalysis>(F);
   auto *BPI = &AM.getResult<BranchProbabilityAnalysis>(F);
   viewCFG(F, BFI, BPI, getMaxFreq(F, BFI), /*CFGOnly=*/true);
@@ -169,6 +181,8 @@
   }
 
   bool runOnFunction(Function &F) override {
+    if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+      return false;
     auto *BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
     writeCFGToDotFile(F, BFI, BPI, getMaxFreq(F, BFI));
@@ -184,7 +198,7 @@
     AU.setPreservesAll();
   }
 };
-}
+} // namespace
 
 char CFGPrinterLegacyPass::ID = 0;
 INITIALIZE_PASS(CFGPrinterLegacyPass, "dot-cfg",
@@ -192,6 +206,8 @@
 
 PreservedAnalyses CFGPrinterPass::run(Function &F,
                                       FunctionAnalysisManager &AM) {
+  if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+    return PreservedAnalyses::all();
   auto *BFI = &AM.getResult<BlockFrequencyAnalysis>(F);
   auto *BPI = &AM.getResult<BranchProbabilityAnalysis>(F);
   writeCFGToDotFile(F, BFI, BPI, getMaxFreq(F, BFI));
@@ -206,6 +222,8 @@
   }
 
   bool runOnFunction(Function &F) override {
+    if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+      return false;
     auto *BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
     auto *BFI = &getAnalysis<BlockFrequencyInfoWrapperPass>().getBFI();
     writeCFGToDotFile(F, BFI, BPI, getMaxFreq(F, BFI), /*CFGOnly=*/true);
@@ -220,7 +238,7 @@
     AU.setPreservesAll();
   }
 };
-}
+} // namespace
 
 char CFGOnlyPrinterLegacyPass::ID = 0;
 INITIALIZE_PASS(CFGOnlyPrinterLegacyPass, "dot-cfg-only",
@@ -229,6 +247,8 @@
 
 PreservedAnalyses CFGOnlyPrinterPass::run(Function &F,
                                           FunctionAnalysisManager &AM) {
+  if (!CFGFuncName.empty() && !F.getName().contains(CFGFuncName))
+    return PreservedAnalyses::all();
   auto *BFI = &AM.getResult<BlockFrequencyAnalysis>(F);
   auto *BPI = &AM.getResult<BranchProbabilityAnalysis>(F);
   writeCFGToDotFile(F, BFI, BPI, getMaxFreq(F, BFI), /*CFGOnly=*/true);
@@ -270,32 +290,45 @@
   return new CFGOnlyPrinterLegacyPass();
 }
 
-void DOTGraphTraits<DOTFuncInfo *>::computeHiddenNodes(const Function *F) {
+/// Find all blocks on the paths which terminate with a deoptimize or 
+/// unreachable (i.e. all blocks which are post-dominated by a deoptimize 
+/// or unreachable). These paths are hidden if the corresponding cl::opts
+/// are enabled.
+void DOTGraphTraits<DOTFuncInfo *>::computeDeoptOrUnreachablePaths(
+    const Function *F) {
   auto evaluateBB = [&](const BasicBlock *Node) {
     if (succ_empty(Node)) {
       const Instruction *TI = Node->getTerminator();
-      isHiddenBasicBlock[Node] =
+      isOnDeoptOrUnreachablePath[Node] =
           (HideUnreachablePaths && isa<UnreachableInst>(TI)) ||
           (HideDeoptimizePaths && Node->getTerminatingDeoptimizeCall());
       return;
     }
-    isHiddenBasicBlock[Node] =
+    isOnDeoptOrUnreachablePath[Node] =
         llvm::all_of(successors(Node), [this](const BasicBlock *BB) {
-          return isHiddenBasicBlock[BB];
+          return isOnDeoptOrUnreachablePath[BB];
         });
   };
   /// The post order traversal iteration is done to know the status of
-  /// isHiddenBasicBlock for all the successors on the current BB.
-  for_each(po_begin(&F->getEntryBlock()), po_end(&F->getEntryBlock()),
-           evaluateBB);
+  /// isOnDeoptOrUnreachablePath for all the successors on the current BB.
+  llvm::for_each(post_order(&F->getEntryBlock()), evaluateBB);
 }
 
 bool DOTGraphTraits<DOTFuncInfo *>::isNodeHidden(const BasicBlock *Node,
                                                  const DOTFuncInfo *CFGInfo) {
-  // If both restricting flags are false, all nodes are displayed.
-  if (!HideUnreachablePaths && !HideDeoptimizePaths)
-    return false;
-  if (isHiddenBasicBlock.find(Node) == isHiddenBasicBlock.end())
-    computeHiddenNodes(Node->getParent());
-  return isHiddenBasicBlock[Node];
+  if (HideColdPaths.getNumOccurrences() > 0)
+    if (auto *BFI = CFGInfo->getBFI()) {
+      uint64_t NodeFreq = BFI->getBlockFreq(Node).getFrequency();
+      uint64_t EntryFreq = BFI->getEntryFreq();
+      // Hide blocks with relative frequency below HideColdPaths threshold.
+      if ((double)NodeFreq / EntryFreq < HideColdPaths)
+        return true;
+    }
+  if (HideUnreachablePaths || HideDeoptimizePaths) {
+    if (isOnDeoptOrUnreachablePath.find(Node) == 
+        isOnDeoptOrUnreachablePath.end())
+      computeDeoptOrUnreachablePaths(Node->getParent());
+    return isOnDeoptOrUnreachablePath[Node];
+  }
+  return false;
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
index 2be23a5..1216d03 100644
--- a/src/llvm-project/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CFLAndersAliasAnalysis.cpp
@@ -850,7 +850,7 @@
   auto *ValB = LocB.Ptr;
 
   if (!ValA->getType()->isPointerTy() || !ValB->getType()->isPointerTy())
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   auto *Fn = parentFunctionOfValue(ValA);
   if (!Fn) {
@@ -861,7 +861,7 @@
       LLVM_DEBUG(
           dbgs()
           << "CFLAndersAA: could not extract parent function information.\n");
-      return MayAlias;
+      return AliasResult::MayAlias;
     }
   } else {
     assert(!parentFunctionOfValue(ValB) || parentFunctionOfValue(ValB) == Fn);
@@ -872,15 +872,15 @@
 
   // AliasMap lookup
   if (FunInfo->mayAlias(ValA, LocA.Size, ValB, LocB.Size))
-    return MayAlias;
-  return NoAlias;
+    return AliasResult::MayAlias;
+  return AliasResult::NoAlias;
 }
 
 AliasResult CFLAndersAAResult::alias(const MemoryLocation &LocA,
                                      const MemoryLocation &LocB,
                                      AAQueryInfo &AAQI) {
   if (LocA.Ptr == LocB.Ptr)
-    return MustAlias;
+    return AliasResult::MustAlias;
 
   // Comparisons between global variables and other constants should be
   // handled by BasicAA.
@@ -891,7 +891,7 @@
     return AAResultBase::alias(LocA, LocB, AAQI);
 
   AliasResult QueryResult = query(LocA, LocB);
-  if (QueryResult == MayAlias)
+  if (QueryResult == AliasResult::MayAlias)
     return AAResultBase::alias(LocA, LocB, AAQI);
 
   return QueryResult;
diff --git a/src/llvm-project/llvm/lib/Analysis/CFLGraph.h b/src/llvm-project/llvm/lib/Analysis/CFLGraph.h
index 21842ed..02a13d6 100644
--- a/src/llvm-project/llvm/lib/Analysis/CFLGraph.h
+++ b/src/llvm-project/llvm/lib/Analysis/CFLGraph.h
@@ -284,6 +284,13 @@
       addAssignEdge(Src, &Inst);
     }
 
+    void visitFreezeInst(FreezeInst &Inst) {
+      // Accessing freeze(ptr) is equivalent to accessing ptr.
+      // The former raises UB iff latter raises UB.
+      auto *Src = Inst.getOperand(0);
+      addAssignEdge(Src, &Inst);
+    }
+
     void visitBinaryOperator(BinaryOperator &Inst) {
       auto *Op1 = Inst.getOperand(0);
       auto *Op2 = Inst.getOperand(1);
diff --git a/src/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
index 85a8c3d..9467bb3 100644
--- a/src/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CFLSteensAliasAnalysis.cpp
@@ -269,7 +269,7 @@
   auto *ValB = const_cast<Value *>(LocB.Ptr);
 
   if (!ValA->getType()->isPointerTy() || !ValB->getType()->isPointerTy())
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   Function *Fn = nullptr;
   Function *MaybeFnA = const_cast<Function *>(parentFunctionOfValue(ValA));
@@ -280,7 +280,7 @@
     LLVM_DEBUG(
         dbgs()
         << "CFLSteensAA: could not extract parent function information.\n");
-    return MayAlias;
+    return AliasResult::MayAlias;
   }
 
   if (MaybeFnA) {
@@ -298,11 +298,11 @@
   auto &Sets = MaybeInfo->getStratifiedSets();
   auto MaybeA = Sets.find(InstantiatedValue{ValA, 0});
   if (!MaybeA.hasValue())
-    return MayAlias;
+    return AliasResult::MayAlias;
 
   auto MaybeB = Sets.find(InstantiatedValue{ValB, 0});
   if (!MaybeB.hasValue())
-    return MayAlias;
+    return AliasResult::MayAlias;
 
   auto SetA = *MaybeA;
   auto SetB = *MaybeB;
@@ -320,14 +320,14 @@
   // - AttrEscaped do not alias globals/arguments, but they may alias
   // AttrUnknown values
   if (SetA.Index == SetB.Index)
-    return MayAlias;
+    return AliasResult::MayAlias;
   if (AttrsA.none() || AttrsB.none())
-    return NoAlias;
+    return AliasResult::NoAlias;
   if (hasUnknownOrCallerAttr(AttrsA) || hasUnknownOrCallerAttr(AttrsB))
-    return MayAlias;
+    return AliasResult::MayAlias;
   if (isGlobalOrArgAttr(AttrsA) && isGlobalOrArgAttr(AttrsB))
-    return MayAlias;
-  return NoAlias;
+    return AliasResult::MayAlias;
+  return AliasResult::NoAlias;
 }
 
 AnalysisKey CFLSteensAA::Key;
diff --git a/src/llvm-project/llvm/lib/Analysis/CGSCCPassManager.cpp b/src/llvm-project/llvm/lib/Analysis/CGSCCPassManager.cpp
index 3230e90..253cc0b 100644
--- a/src/llvm-project/llvm/lib/Analysis/CGSCCPassManager.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CGSCCPassManager.cpp
@@ -69,9 +69,6 @@
 
   PreservedAnalyses PA = PreservedAnalyses::all();
 
-  if (DebugLogging)
-    dbgs() << "Starting CGSCC pass manager run.\n";
-
   // The SCC may be refined while we are running passes over it, so set up
   // a pointer that we can update.
   LazyCallGraph::SCC *C = &InitialC;
@@ -142,9 +139,6 @@
   // one individually.
   PA.preserveSet<AllAnalysesOn<LazyCallGraph::SCC>>();
 
-  if (DebugLogging)
-    dbgs() << "Finished CGSCC pass manager run.\n";
-
   return PA;
 }
 
@@ -438,8 +432,13 @@
       break;
     }
 
-    // Check that we didn't miss any update scenario.
-    assert(!UR.InvalidatedSCCs.count(C) && "Processing an invalid SCC!");
+    // If the CGSCC pass wasn't able to provide a valid updated SCC, the
+    // current SCC may simply need to be skipped if invalid.
+    if (UR.InvalidatedSCCs.count(C)) {
+      LLVM_DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
+      break;
+    }
+
     assert(C->begin() != C->end() && "Cannot have an empty SCC!");
 
     // Check whether any of the handles were devirtualized.
@@ -720,7 +719,7 @@
   auto PAC = PA.getChecker<FunctionAnalysisManagerCGSCCProxy>();
   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) {
     for (LazyCallGraph::Node &N : C)
-      FAM->clear(N.getFunction(), N.getFunction().getName());
+      FAM->invalidate(N.getFunction(), PA);
 
     return false;
   }
@@ -872,8 +871,7 @@
   if (FAM)
     updateNewSCCFunctionAnalyses(*C, G, AM, *FAM);
 
-  for (SCC &NewC : llvm::reverse(make_range(std::next(NewSCCRange.begin()),
-                                            NewSCCRange.end()))) {
+  for (SCC &NewC : llvm::reverse(llvm::drop_begin(NewSCCRange))) {
     assert(C != &NewC && "No need to re-visit the current SCC!");
     assert(OldC != &NewC && "Already handled the original SCC!");
     UR.CWorklist.insert(&NewC);
@@ -981,8 +979,10 @@
     RefSCC &TargetRC = TargetC.getOuterRefSCC();
     (void)TargetRC;
     // TODO: This only allows trivial edges to be added for now.
+#ifdef EXPENSIVE_CHECKS
     assert((RC == &TargetRC ||
            RC->isAncestorOf(TargetRC)) && "New ref edge is not trivial!");
+#endif
     RC->insertTrivialRefEdge(N, *RefTarget);
   }
 
@@ -992,8 +992,10 @@
     RefSCC &TargetRC = TargetC.getOuterRefSCC();
     (void)TargetRC;
     // TODO: This only allows trivial edges to be added for now.
+#ifdef EXPENSIVE_CHECKS
     assert((RC == &TargetRC ||
            RC->isAncestorOf(TargetRC)) && "New call edge is not trivial!");
+#endif
     // Add a trivial ref edge to be promoted later on alongside
     // PromotedRefTargets.
     RC->insertTrivialRefEdge(N, *CallTarget);
@@ -1040,9 +1042,9 @@
     if (&TargetRC == RC)
       return false;
 
-    RC->removeOutgoingEdge(N, *TargetN);
     LLVM_DEBUG(dbgs() << "Deleting outgoing edge from '" << N << "' to '"
-                      << TargetN << "'\n");
+                      << *TargetN << "'\n");
+    RC->removeOutgoingEdge(N, *TargetN);
     return true;
   });
 
@@ -1067,8 +1069,7 @@
     // "bottom" we will continue processing in the bottom-up walk.
     assert(NewRefSCCs.front() == RC &&
            "New current RefSCC not first in the returned list!");
-    for (RefSCC *NewRC : llvm::reverse(make_range(std::next(NewRefSCCs.begin()),
-                                                  NewRefSCCs.end()))) {
+    for (RefSCC *NewRC : llvm::reverse(llvm::drop_begin(NewRefSCCs))) {
       assert(NewRC != RC && "Should not encounter the current RefSCC further "
                             "in the postorder list of new RefSCCs.");
       UR.RCWorklist.insert(NewRC);
@@ -1087,8 +1088,10 @@
     // The easy case is when the target RefSCC is not this RefSCC. This is
     // only supported when the target RefSCC is a child of this RefSCC.
     if (&TargetRC != RC) {
+#ifdef EXPENSIVE_CHECKS
       assert(RC->isAncestorOf(TargetRC) &&
              "Cannot potentially form RefSCC cycles here!");
+#endif
       RC->switchOutgoingEdgeToRef(N, *RefTarget);
       LLVM_DEBUG(dbgs() << "Switch outgoing call edge to a ref edge from '" << N
                         << "' to '" << *RefTarget << "'\n");
@@ -1121,8 +1124,10 @@
     // The easy case is when the target RefSCC is not this RefSCC. This is
     // only supported when the target RefSCC is a child of this RefSCC.
     if (&TargetRC != RC) {
+#ifdef EXPENSIVE_CHECKS
       assert(RC->isAncestorOf(TargetRC) &&
              "Cannot potentially form RefSCC cycles here!");
+#endif
       RC->switchOutgoingEdgeToCall(N, *CallTarget);
       LLVM_DEBUG(dbgs() << "Switch outgoing ref edge to a call edge from '" << N
                         << "' to '" << *CallTarget << "'\n");
diff --git a/src/llvm-project/llvm/lib/Analysis/CMakeLists.txt b/src/llvm-project/llvm/lib/Analysis/CMakeLists.txt
index f31cf34..e0cd6b3 100644
--- a/src/llvm-project/llvm/lib/Analysis/CMakeLists.txt
+++ b/src/llvm-project/llvm/lib/Analysis/CMakeLists.txt
@@ -1,15 +1,33 @@
 if (DEFINED LLVM_HAVE_TF_AOT OR DEFINED LLVM_HAVE_TF_API)
+  include(TensorFlowCompile)
+  set(LLVM_INLINER_MODEL_PATH_DEFAULT "models/inliner-Oz")
+
+  # This url points to the most recent most which is known to be compatible with
+  # LLVM. When better models are published, this url should be updated to aid
+  # discoverability.
+  set(LLVM_INLINER_MODEL_CURRENT_URL "https://github.com/google/ml-compiler-opt/releases/download/inlining-Oz-v1.0/inlining-Oz-99f0063-v1.0.tar.gz")
+
   if (DEFINED LLVM_HAVE_TF_AOT)
-    include(TensorFlowCompile)
-    tfcompile(models/inliner serve action InlinerSizeModel llvm::InlinerSizeModel)
-    list(APPEND GeneratedMLSources
-      $<TARGET_OBJECTS:tf_xla_runtime_objects>
-      ${GENERATED_OBJS}
+    # If the path is empty, autogenerate the model
+    if (NOT DEFINED LLVM_INLINER_MODEL_PATH OR "${LLVM_INLINER_MODEL_PATH}" STREQUAL "")
+      set(LLVM_INLINER_MODEL_PATH "autogenerate")
+      message(WARNING "LLVM_INLINER_MODEL_PATH was not set: autogenerating a model to finish the build.")
+    endif()
+
+    tf_find_and_compile(
+      ${LLVM_INLINER_MODEL_PATH}
+      ${LLVM_INLINER_MODEL_CURRENT_URL}
+      ${LLVM_INLINER_MODEL_PATH_DEFAULT}
+      "models/inlining/config.py"
+      serve
+      action
+      InlinerSizeModel
+      llvm::InlinerSizeModel
     )
   endif()
 
   if (DEFINED LLVM_HAVE_TF_API)
-    LIST(APPEND MLLinkDeps ${tensorflow_c_api})
+    list(APPEND MLLinkDeps ${tensorflow_c_api} ${tensorflow_fx})
   endif()
 endif()
 
@@ -97,6 +115,7 @@
   ObjCARCAnalysisUtils.cpp
   ObjCARCInstKind.cpp
   OptimizationRemarkEmitter.cpp
+  OverflowInstAnalysis.cpp
   PHITransAddr.cpp
   PhiValues.cpp
   PostDominators.cpp
@@ -134,6 +153,7 @@
 
   DEPENDS
   intrinsics_gen
+  ${MLDeps}
 
   LINK_LIBS
   ${MLLinkDeps}
diff --git a/src/llvm-project/llvm/lib/Analysis/CallGraph.cpp b/src/llvm-project/llvm/lib/Analysis/CallGraph.cpp
index 9b212e5..dfbd29b 100644
--- a/src/llvm-project/llvm/lib/Analysis/CallGraph.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CallGraph.cpp
@@ -80,7 +80,9 @@
   // If this function has external linkage or has its address taken and
   // it is not a callback, then anything could call it.
   if (!F->hasLocalLinkage() ||
-      F->hasAddressTaken(nullptr, /*IgnoreCallbackUses=*/true))
+      F->hasAddressTaken(nullptr, /*IgnoreCallbackUses=*/true,
+                         /* IgnoreAssumeLikeCalls */ true,
+                         /* IgnoreLLVMUsed */ false))
     ExternalCallingNode->addCalledFunction(nullptr, Node);
 
   populateCallGraphNode(Node);
diff --git a/src/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp b/src/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp
index 38057d4..f2e5eab 100644
--- a/src/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CallGraphSCCPass.cpp
@@ -43,8 +43,10 @@
 
 #define DEBUG_TYPE "cgscc-passmgr"
 
+namespace llvm {
 cl::opt<unsigned> MaxDevirtIterations("max-devirt-iterations", cl::ReallyHidden,
                                       cl::init(4));
+}
 
 STATISTIC(MaxSCCIterations, "Maximum CGSCCPassMgr iterations on one SCC");
 
@@ -454,10 +456,10 @@
       std::string Functions;
   #ifndef NDEBUG
       raw_string_ostream OS(Functions);
-      for (CallGraphSCC::iterator I = CurSCC.begin(), E = CurSCC.end();
-           I != E; ++I) {
-        if (I != CurSCC.begin()) OS << ", ";
-        (*I)->print(OS);
+      ListSeparator LS;
+      for (const CallGraphNode *CGN : CurSCC) {
+        OS << LS;
+        CGN->print(OS);
       }
       OS.flush();
   #endif
@@ -734,12 +736,9 @@
 
 static std::string getDescription(const CallGraphSCC &SCC) {
   std::string Desc = "SCC (";
-  bool First = true;
+  ListSeparator LS;
   for (CallGraphNode *CGN : SCC) {
-    if (First)
-      First = false;
-    else
-      Desc += ", ";
+    Desc += LS;
     Function *F = CGN->getFunction();
     if (F)
       Desc += F->getName();
diff --git a/src/llvm-project/llvm/lib/Analysis/CallPrinter.cpp b/src/llvm-project/llvm/lib/Analysis/CallPrinter.cpp
index 872a91a..829532a 100644
--- a/src/llvm-project/llvm/lib/Analysis/CallPrinter.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CallPrinter.cpp
@@ -62,17 +62,17 @@
       : M(M), CG(CG), LookupBFI(LookupBFI) {
     MaxFreq = 0;
 
-    for (auto F = M->getFunctionList().begin(); F != M->getFunctionList().end(); ++F) {
+    for (Function &F : M->getFunctionList()) {
       uint64_t localSumFreq = 0;
       SmallSet<Function *, 16> Callers;
-      for (User *U : (*F).users())
+      for (User *U : F.users())
         if (isa<CallInst>(U))
           Callers.insert(cast<Instruction>(U)->getFunction());
-      for (auto iter = Callers.begin() ; iter != Callers.end() ; ++iter)
-        localSumFreq += getNumOfCalls((**iter), *F);
+      for (Function *Caller : Callers)
+        localSumFreq += getNumOfCalls(*Caller, F);
       if (localSumFreq >= MaxFreq)
         MaxFreq = localSumFreq;
-      Freq[&*F] = localSumFreq;
+      Freq[&F] = localSumFreq;
     }
     if (!CallMultiGraph)
       removeParallelEdges();
@@ -274,7 +274,7 @@
   errs() << "Writing '" << Filename << "'...";
 
   std::error_code EC;
-  raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
+  raw_fd_ostream File(Filename, EC, sys::fs::OF_Text);
 
   CallGraph CG(M);
   CallGraphDOTInfo CFGInfo(&M, &CG, LookupBFI);
diff --git a/src/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp b/src/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp
index b2fc6e6..5fe4f9b 100644
--- a/src/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CaptureTracking.cpp
@@ -68,8 +68,8 @@
   if (auto *GEP = dyn_cast<GetElementPtrInst>(O))
     if (GEP->isInBounds())
       return true;
-  bool CanBeNull;
-  return O->getPointerDereferenceableBytes(DL, CanBeNull);
+  bool CanBeNull, CanBeFreed;
+  return O->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
 }
 
 namespace {
@@ -106,65 +106,27 @@
     void tooManyUses() override { Captured = true; }
 
     bool isSafeToPrune(Instruction *I) {
-      BasicBlock *BB = I->getParent();
+      if (BeforeHere == I)
+        return !IncludeI;
+
       // We explore this usage only if the usage can reach "BeforeHere".
       // If use is not reachable from entry, there is no need to explore.
-      if (BeforeHere != I && !DT->isReachableFromEntry(BB))
+      if (!DT->isReachableFromEntry(I->getParent()))
         return true;
 
-      // Compute the case where both instructions are inside the same basic
-      // block.
-      if (BB == BeforeHere->getParent()) {
-        // 'I' dominates 'BeforeHere' => not safe to prune.
-        //
-        // The value defined by an invoke dominates an instruction only
-        // if it dominates every instruction in UseBB. A PHI is dominated only
-        // if the instruction dominates every possible use in the UseBB. Since
-        // UseBB == BB, avoid pruning.
-        if (isa<InvokeInst>(BeforeHere) || isa<PHINode>(I) || I == BeforeHere)
-          return false;
-        if (!BeforeHere->comesBefore(I))
-          return false;
-
-        // 'BeforeHere' comes before 'I', it's safe to prune if we also
-        // guarantee that 'I' never reaches 'BeforeHere' through a back-edge or
-        // by its successors, i.e, prune if:
-        //
-        //  (1) BB is an entry block or have no successors.
-        //  (2) There's no path coming back through BB successors.
-        if (BB == &BB->getParent()->getEntryBlock() ||
-            !BB->getTerminator()->getNumSuccessors())
-          return true;
-
-        SmallVector<BasicBlock*, 32> Worklist;
-        Worklist.append(succ_begin(BB), succ_end(BB));
-        return !isPotentiallyReachableFromMany(Worklist, BB, nullptr, DT);
-      }
-
-      // If the value is defined in the same basic block as use and BeforeHere,
-      // there is no need to explore the use if BeforeHere dominates use.
       // Check whether there is a path from I to BeforeHere.
-      if (BeforeHere != I && DT->dominates(BeforeHere, I) &&
-          !isPotentiallyReachable(I, BeforeHere, nullptr, DT))
-        return true;
-
-      return false;
-    }
-
-    bool shouldExplore(const Use *U) override {
-      Instruction *I = cast<Instruction>(U->getUser());
-
-      if (BeforeHere == I && !IncludeI)
-        return false;
-
-      if (isSafeToPrune(I))
-        return false;
-
-      return true;
+      return !isPotentiallyReachable(I, BeforeHere, nullptr, DT);
     }
 
     bool captured(const Use *U) override {
-      if (isa<ReturnInst>(U->getUser()) && !ReturnCaptures)
+      Instruction *I = cast<Instruction>(U->getUser());
+      if (isa<ReturnInst>(I) && !ReturnCaptures)
+        return false;
+
+      // Check isSafeToPrune() here rather than in shouldExplore() to avoid
+      // an expensive reachability query for every instruction we look at.
+      // Instead we only do one for actual capturing candidates.
+      if (isSafeToPrune(I))
         return false;
 
       Captured = true;
@@ -423,8 +385,8 @@
       return CacheIt->second;
   }
 
-  // If this is a local allocation, check to see if it escapes.
-  if (isa<AllocaInst>(V) || isNoAliasCall(V)) {
+  // If this is an identified function-local object, check to see if it escapes.
+  if (isIdentifiedFunctionLocal(V)) {
     // Set StoreCaptures to True so that we can assume in our callers that the
     // pointer is not the result of a load instruction. Currently
     // PointerMayBeCaptured doesn't have any special analysis for the
@@ -436,19 +398,5 @@
     return Ret;
   }
 
-  // If this is an argument that corresponds to a byval or noalias argument,
-  // then it has not escaped before entering the function.  Check if it escapes
-  // inside the function.
-  if (const Argument *A = dyn_cast<Argument>(V))
-    if (A->hasByValAttr() || A->hasNoAliasAttr()) {
-      // Note even if the argument is marked nocapture, we still need to check
-      // for copies made inside the function. The nocapture attribute only
-      // specifies that there are no copies made that outlive the function.
-      auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
-      if (IsCapturedCache)
-        CacheIt->second = Ret;
-      return Ret;
-    }
-
   return false;
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp b/src/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp
index 157811c..8c8e2ee 100644
--- a/src/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CodeMetrics.cpp
@@ -18,6 +18,7 @@
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/Function.h"
 #include "llvm/Support/Debug.h"
+#include "llvm/Support/InstructionCost.h"
 
 #define DEBUG_TYPE "code-metrics"
 
@@ -116,7 +117,14 @@
     const BasicBlock *BB, const TargetTransformInfo &TTI,
     const SmallPtrSetImpl<const Value *> &EphValues, bool PrepareForLTO) {
   ++NumBlocks;
-  unsigned NumInstsBeforeThisBB = NumInsts;
+  // Use a proxy variable for NumInsts of type InstructionCost, so that it can
+  // use InstructionCost's arithmetic properties such as saturation when this
+  // feature is added to InstructionCost.
+  // When storing the value back to NumInsts, we can assume all costs are Valid
+  // because the IR should not contain any nodes that cannot be costed. If that
+  // happens the cost-model is broken.
+  InstructionCost NumInstsProxy = NumInsts;
+  InstructionCost NumInstsBeforeThisBB = NumInsts;
   for (const Instruction &I : *BB) {
     // Skip ephemeral values.
     if (EphValues.count(&I))
@@ -175,7 +183,8 @@
       if (InvI->cannotDuplicate())
         notDuplicatable = true;
 
-    NumInsts += TTI.getUserCost(&I, TargetTransformInfo::TCK_CodeSize);
+    NumInstsProxy += TTI.getUserCost(&I, TargetTransformInfo::TCK_CodeSize);
+    NumInsts = *NumInstsProxy.getValue();
   }
 
   if (isa<ReturnInst>(BB->getTerminator()))
@@ -195,5 +204,6 @@
   notDuplicatable |= isa<IndirectBrInst>(BB->getTerminator());
 
   // Remember NumInsts for this BB.
-  NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
+  InstructionCost NumInstsThisBB = NumInstsProxy - NumInstsBeforeThisBB;
+  NumBBInsts[BB] = *NumInstsThisBB.getValue();
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp b/src/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp
index fea11cf..b28a0d6 100644
--- a/src/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ConstantFolding.cpp
@@ -41,6 +41,7 @@
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
 #include "llvm/IR/IntrinsicsARM.h"
 #include "llvm/IR/IntrinsicsWebAssembly.h"
@@ -62,6 +63,11 @@
 using namespace llvm;
 
 namespace {
+Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
+                                  ArrayRef<Constant *> Ops,
+                                  const DataLayout &DL,
+                                  const TargetLibraryInfo *TLI,
+                                  bool ForLoadOperand);
 
 //===----------------------------------------------------------------------===//
 // Constant Folding internal helper functions
@@ -389,7 +395,7 @@
 
     // If this isn't an aggregate type, there is nothing we can do to drill down
     // and find a bitcastable constant.
-    if (!SrcTy->isAggregateType())
+    if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
       return nullptr;
 
     // We're simulating a load through a pointer that was bitcast to point to
@@ -658,16 +664,10 @@
 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
                                              const DataLayout &DL) {
   auto *SrcPtr = CE->getOperand(0);
-  auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
-  if (!SrcPtrTy)
-    return nullptr;
-  Type *SrcTy = SrcPtrTy->getPointerElementType();
-
-  Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
-  if (!C)
+  if (!SrcPtr->getType()->isPointerTy())
     return nullptr;
 
-  return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
+  return ConstantFoldLoadFromConstPtr(SrcPtr, DestTy, DL);
 }
 
 } // end anonymous namespace
@@ -677,7 +677,7 @@
   // First, try the easy cases:
   if (auto *GV = dyn_cast<GlobalVariable>(C))
     if (GV->isConstant() && GV->hasDefinitiveInitializer())
-      return GV->getInitializer();
+      return ConstantFoldLoadThroughBitcast(GV->getInitializer(), Ty, DL);
 
   if (auto *GA = dyn_cast<GlobalAlias>(C))
     if (GA->getAliasee() && !GA->isInterposable())
@@ -691,10 +691,37 @@
   if (CE->getOpcode() == Instruction::GetElementPtr) {
     if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
       if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
-        if (Constant *V =
-             ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
+        if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(
+                GV->getInitializer(), CE, Ty, DL))
           return V;
       }
+    } else {
+      // Try to simplify GEP if the pointer operand wasn't a GlobalVariable.
+      // SymbolicallyEvaluateGEP() with `ForLoadOperand = true` can potentially
+      // simplify the GEP more than it normally would have been, but should only
+      // be used for const folding loads.
+      SmallVector<Constant *> Ops;
+      for (unsigned I = 0, E = CE->getNumOperands(); I != E; ++I)
+        Ops.push_back(cast<Constant>(CE->getOperand(I)));
+      if (auto *Simplified = dyn_cast_or_null<ConstantExpr>(
+              SymbolicallyEvaluateGEP(cast<GEPOperator>(CE), Ops, DL, nullptr,
+                                      /*ForLoadOperand*/ true))) {
+        // If the symbolically evaluated GEP is another GEP, we can only const
+        // fold it if the resulting pointer operand is a GlobalValue. Otherwise
+        // there is nothing else to simplify since the GEP is already in the
+        // most simplified form.
+        if (isa<GEPOperator>(Simplified)) {
+          if (auto *GV = dyn_cast<GlobalVariable>(Simplified->getOperand(0))) {
+            if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
+              if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(
+                      GV->getInitializer(), Simplified, Ty, DL))
+                return V;
+            }
+          }
+        } else {
+          return ConstantFoldLoadFromConstPtr(Simplified, Ty, DL);
+        }
+      }
     }
   }
 
@@ -753,15 +780,6 @@
 
 namespace {
 
-Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
-  if (LI->isVolatile()) return nullptr;
-
-  if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
-    return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
-
-  return nullptr;
-}
-
 /// One of Op0/Op1 is a constant expression.
 /// Attempt to symbolically evaluate the result of a binary operator merging
 /// these together.  If target data info is available, it is provided as DL,
@@ -849,18 +867,24 @@
 }
 
 /// Strip the pointer casts, but preserve the address space information.
-Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) {
+Constant *StripPtrCastKeepAS(Constant *Ptr, bool ForLoadOperand) {
   assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
   auto *OldPtrTy = cast<PointerType>(Ptr->getType());
   Ptr = cast<Constant>(Ptr->stripPointerCasts());
-  auto *NewPtrTy = cast<PointerType>(Ptr->getType());
+  if (ForLoadOperand) {
+    while (isa<GlobalAlias>(Ptr) && !cast<GlobalAlias>(Ptr)->isInterposable() &&
+           !cast<GlobalAlias>(Ptr)->getBaseObject()->isInterposable()) {
+      Ptr = cast<GlobalAlias>(Ptr)->getAliasee();
+    }
+  }
 
-  ElemTy = NewPtrTy->getPointerElementType();
+  auto *NewPtrTy = cast<PointerType>(Ptr->getType());
 
   // Preserve the address space number of the pointer.
   if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
-    NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
-    Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
+    Ptr = ConstantExpr::getPointerCast(
+        Ptr, PointerType::getWithSamePointeeType(NewPtrTy,
+                                                 OldPtrTy->getAddressSpace()));
   }
   return Ptr;
 }
@@ -869,7 +893,8 @@
 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
                                   ArrayRef<Constant *> Ops,
                                   const DataLayout &DL,
-                                  const TargetLibraryInfo *TLI) {
+                                  const TargetLibraryInfo *TLI,
+                                  bool ForLoadOperand) {
   const GEPOperator *InnermostGEP = GEP;
   bool InBounds = GEP->isInBounds();
 
@@ -889,27 +914,24 @@
 
   Type *IntIdxTy = DL.getIndexType(Ptr->getType());
 
-  // If this is a constant expr gep that is effectively computing an
-  // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
-  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
-      if (!isa<ConstantInt>(Ops[i])) {
+  // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
+  // "inttoptr (sub (ptrtoint Ptr), V)"
+  if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
+    auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
+    assert((!CE || CE->getType() == IntIdxTy) &&
+           "CastGEPIndices didn't canonicalize index types!");
+    if (CE && CE->getOpcode() == Instruction::Sub &&
+        CE->getOperand(0)->isNullValue()) {
+      Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
+      Res = ConstantExpr::getSub(Res, CE->getOperand(1));
+      Res = ConstantExpr::getIntToPtr(Res, ResTy);
+      return ConstantFoldConstant(Res, DL, TLI);
+    }
+  }
 
-        // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
-        // "inttoptr (sub (ptrtoint Ptr), V)"
-        if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
-          auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
-          assert((!CE || CE->getType() == IntIdxTy) &&
-                 "CastGEPIndices didn't canonicalize index types!");
-          if (CE && CE->getOpcode() == Instruction::Sub &&
-              CE->getOperand(0)->isNullValue()) {
-            Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
-            Res = ConstantExpr::getSub(Res, CE->getOperand(1));
-            Res = ConstantExpr::getIntToPtr(Res, ResTy);
-            return ConstantFoldConstant(Res, DL, TLI);
-          }
-        }
-        return nullptr;
-      }
+  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
+    if (!isa<ConstantInt>(Ops[i]))
+      return nullptr;
 
   unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
   APInt Offset =
@@ -917,7 +939,7 @@
             DL.getIndexedOffsetInType(
                 SrcElemTy,
                 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
-  Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
+  Ptr = StripPtrCastKeepAS(Ptr, ForLoadOperand);
 
   // If this is a GEP of a GEP, fold it all into a single GEP.
   while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
@@ -939,7 +961,7 @@
     Ptr = cast<Constant>(GEP->getOperand(0));
     SrcElemTy = GEP->getSourceElementType();
     Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
-    Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
+    Ptr = StripPtrCastKeepAS(Ptr, ForLoadOperand);
   }
 
   // If the base value for this address is a literal integer value, fold the
@@ -963,8 +985,9 @@
   // we eliminate over-indexing of the notional static type array bounds.
   // This makes it easy to determine if the getelementptr is "inbounds".
   // Also, this helps GlobalOpt do SROA on GlobalVariables.
-  Type *Ty = PTy;
   SmallVector<Constant *, 32> NewIdxs;
+  Type *Ty = PTy;
+  SrcElemTy = PTy->getElementType();
 
   do {
     if (!Ty->isStructTy()) {
@@ -1049,7 +1072,7 @@
 
   // If we ended up indexing a member with a type that doesn't match
   // the type of what the original indices indexed, add a cast.
-  if (Ty != ResElemTy)
+  if (C->getType() != ResTy)
     C = FoldBitCast(C, ResTy, DL);
 
   return C;
@@ -1076,7 +1099,8 @@
     return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
 
   if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
-    if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
+    if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI,
+                                              /*ForLoadOperand*/ false))
       return C;
 
     return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
@@ -1211,21 +1235,17 @@
     return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
                                            DL, TLI);
 
-  if (const auto *LI = dyn_cast<LoadInst>(I))
-    return ConstantFoldLoadInst(LI, DL);
-
-  if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
-    return ConstantExpr::getInsertValue(
-                                cast<Constant>(IVI->getAggregateOperand()),
-                                cast<Constant>(IVI->getInsertedValueOperand()),
-                                IVI->getIndices());
+  if (const auto *LI = dyn_cast<LoadInst>(I)) {
+    if (LI->isVolatile())
+      return nullptr;
+    return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
   }
 
-  if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
-    return ConstantExpr::getExtractValue(
-                                    cast<Constant>(EVI->getAggregateOperand()),
-                                    EVI->getIndices());
-  }
+  if (auto *IVI = dyn_cast<InsertValueInst>(I))
+    return ConstantExpr::getInsertValue(Ops[0], Ops[1], IVI->getIndices());
+
+  if (auto *EVI = dyn_cast<ExtractValueInst>(I))
+    return ConstantExpr::getExtractValue(Ops[0], EVI->getIndices());
 
   return ConstantFoldInstOperands(I, Ops, DL, TLI);
 }
@@ -1410,7 +1430,9 @@
 }
 
 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
-                                                       ConstantExpr *CE) {
+                                                       ConstantExpr *CE,
+                                                       Type *Ty,
+                                                       const DataLayout &DL) {
   if (!CE->getOperand(1)->isNullValue())
     return nullptr;  // Do not allow stepping over the value!
 
@@ -1421,7 +1443,7 @@
     if (!C)
       return nullptr;
   }
-  return C;
+  return ConstantFoldLoadThroughBitcast(C, Ty, DL);
 }
 
 Constant *
@@ -1486,10 +1508,12 @@
   case Intrinsic::vector_reduce_umin:
   case Intrinsic::vector_reduce_umax:
   // Target intrinsics
+  case Intrinsic::amdgcn_perm:
   case Intrinsic::arm_mve_vctp8:
   case Intrinsic::arm_mve_vctp16:
   case Intrinsic::arm_mve_vctp32:
   case Intrinsic::arm_mve_vctp64:
+  case Intrinsic::aarch64_sve_convert_from_svbool:
   // WebAssembly float semantics are always known
   case Intrinsic::wasm_trunc_signed:
   case Intrinsic::wasm_trunc_unsigned:
@@ -1569,6 +1593,13 @@
   case Intrinsic::rint:
   // Constrained intrinsics can be folded if FP environment is known
   // to compiler.
+  case Intrinsic::experimental_constrained_fma:
+  case Intrinsic::experimental_constrained_fmuladd:
+  case Intrinsic::experimental_constrained_fadd:
+  case Intrinsic::experimental_constrained_fsub:
+  case Intrinsic::experimental_constrained_fmul:
+  case Intrinsic::experimental_constrained_fdiv:
+  case Intrinsic::experimental_constrained_frem:
   case Intrinsic::experimental_constrained_ceil:
   case Intrinsic::experimental_constrained_floor:
   case Intrinsic::experimental_constrained_round:
@@ -1694,42 +1725,57 @@
   return false;
 }
 
-Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
+Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
+                         Type *Ty) {
   llvm_fenv_clearexcept();
-  V = NativeFP(V);
+  double Result = NativeFP(V.convertToDouble());
   if (llvm_fenv_testexcept()) {
     llvm_fenv_clearexcept();
     return nullptr;
   }
 
-  return GetConstantFoldFPValue(V, Ty);
+  return GetConstantFoldFPValue(Result, Ty);
 }
 
-Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
-                               double W, Type *Ty) {
+Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
+                               const APFloat &V, const APFloat &W, Type *Ty) {
   llvm_fenv_clearexcept();
-  V = NativeFP(V, W);
+  double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
   if (llvm_fenv_testexcept()) {
     llvm_fenv_clearexcept();
     return nullptr;
   }
 
-  return GetConstantFoldFPValue(V, Ty);
+  return GetConstantFoldFPValue(Result, Ty);
 }
 
-Constant *ConstantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
+Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
   FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
   if (!VT)
     return nullptr;
-  ConstantInt *CI = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
-  if (!CI)
-    return nullptr;
-  APInt Acc = CI->getValue();
 
-  for (unsigned I = 1; I < VT->getNumElements(); I++) {
-    if (!(CI = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
+  // This isn't strictly necessary, but handle the special/common case of zero:
+  // all integer reductions of a zero input produce zero.
+  if (isa<ConstantAggregateZero>(Op))
+    return ConstantInt::get(VT->getElementType(), 0);
+
+  // This is the same as the underlying binops - poison propagates.
+  if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
+    return PoisonValue::get(VT->getElementType());
+
+  // TODO: Handle undef.
+  if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
+    return nullptr;
+
+  auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
+  if (!EltC)
+    return nullptr;
+
+  APInt Acc = EltC->getValue();
+  for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
+    if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
       return nullptr;
-    const APInt &X = CI->getValue();
+    const APInt &X = EltC->getValue();
     switch (IID) {
     case Intrinsic::vector_reduce_add:
       Acc = Acc + X;
@@ -1794,10 +1840,7 @@
 double getValueAsDouble(ConstantFP *Op) {
   Type *Ty = Op->getType();
 
-  if (Ty->isFloatTy())
-    return Op->getValueAPF().convertToFloat();
-
-  if (Ty->isDoubleTy())
+  if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
     return Op->getValueAPF().convertToDouble();
 
   bool unused;
@@ -1818,6 +1861,56 @@
   return false;
 }
 
+/// Checks if the given intrinsic call, which evaluates to constant, is allowed
+/// to be folded.
+///
+/// \param CI Constrained intrinsic call.
+/// \param St Exception flags raised during constant evaluation.
+static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
+                               APFloat::opStatus St) {
+  Optional<RoundingMode> ORM = CI->getRoundingMode();
+  Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
+
+  // If the operation does not change exception status flags, it is safe
+  // to fold.
+  if (St == APFloat::opStatus::opOK) {
+    // When FP exceptions are not ignored, intrinsic call will not be
+    // eliminated, because it is considered as having side effect. But we
+    // know that its evaluation does not raise exceptions, so side effect
+    // is absent. To allow removing the call, mark it as not accessing memory.
+    if (EB && *EB != fp::ExceptionBehavior::ebIgnore)
+      CI->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
+    return true;
+  }
+
+  // If evaluation raised FP exception, the result can depend on rounding
+  // mode. If the latter is unknown, folding is not possible.
+  if (!ORM || *ORM == RoundingMode::Dynamic)
+    return false;
+
+  // If FP exceptions are ignored, fold the call, even if such exception is
+  // raised.
+  if (!EB || *EB != fp::ExceptionBehavior::ebStrict)
+    return true;
+
+  // Leave the calculation for runtime so that exception flags be correctly set
+  // in hardware.
+  return false;
+}
+
+/// Returns the rounding mode that should be used for constant evaluation.
+static RoundingMode
+getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
+  Optional<RoundingMode> ORM = CI->getRoundingMode();
+  if (!ORM || *ORM == RoundingMode::Dynamic)
+    // Even if the rounding mode is unknown, try evaluating the operation.
+    // If it does not raise inexact exception, rounding was not applied,
+    // so the result is exact and does not depend on rounding mode. Whether
+    // other FP exceptions are raised, it does not depend on rounding mode.
+    return RoundingMode::NearestTiesToEven;
+  return *ORM;
+}
+
 static Constant *ConstantFoldScalarCall1(StringRef Name,
                                          Intrinsic::ID IntrinsicID,
                                          Type *Ty,
@@ -2019,31 +2112,32 @@
     /// the host native double versions.  Float versions are not called
     /// directly but for all these it is true (float)(f((double)arg)) ==
     /// f(arg).  Long double not supported yet.
-    double V = getValueAsDouble(Op);
+    APFloat APF = Op->getValueAPF();
 
     switch (IntrinsicID) {
       default: break;
       case Intrinsic::log:
-        return ConstantFoldFP(log, V, Ty);
+        return ConstantFoldFP(log, APF, Ty);
       case Intrinsic::log2:
         // TODO: What about hosts that lack a C99 library?
-        return ConstantFoldFP(Log2, V, Ty);
+        return ConstantFoldFP(Log2, APF, Ty);
       case Intrinsic::log10:
         // TODO: What about hosts that lack a C99 library?
-        return ConstantFoldFP(log10, V, Ty);
+        return ConstantFoldFP(log10, APF, Ty);
       case Intrinsic::exp:
-        return ConstantFoldFP(exp, V, Ty);
+        return ConstantFoldFP(exp, APF, Ty);
       case Intrinsic::exp2:
         // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
-        return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
+        return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
       case Intrinsic::sin:
-        return ConstantFoldFP(sin, V, Ty);
+        return ConstantFoldFP(sin, APF, Ty);
       case Intrinsic::cos:
-        return ConstantFoldFP(cos, V, Ty);
+        return ConstantFoldFP(cos, APF, Ty);
       case Intrinsic::sqrt:
-        return ConstantFoldFP(sqrt, V, Ty);
+        return ConstantFoldFP(sqrt, APF, Ty);
       case Intrinsic::amdgcn_cos:
-      case Intrinsic::amdgcn_sin:
+      case Intrinsic::amdgcn_sin: {
+        double V = getValueAsDouble(Op);
         if (V < -256.0 || V > 256.0)
           // The gfx8 and gfx9 architectures handle arguments outside the range
           // [-256, 256] differently. This should be a rare case so bail out
@@ -2062,6 +2156,7 @@
             V = sin(V * 2.0 * numbers::pi);
         }
         return GetConstantFoldFPValue(V, Ty);
+      }
     }
 
     if (!TLI)
@@ -2077,19 +2172,19 @@
     case LibFunc_acos_finite:
     case LibFunc_acosf_finite:
       if (TLI->has(Func))
-        return ConstantFoldFP(acos, V, Ty);
+        return ConstantFoldFP(acos, APF, Ty);
       break;
     case LibFunc_asin:
     case LibFunc_asinf:
     case LibFunc_asin_finite:
     case LibFunc_asinf_finite:
       if (TLI->has(Func))
-        return ConstantFoldFP(asin, V, Ty);
+        return ConstantFoldFP(asin, APF, Ty);
       break;
     case LibFunc_atan:
     case LibFunc_atanf:
       if (TLI->has(Func))
-        return ConstantFoldFP(atan, V, Ty);
+        return ConstantFoldFP(atan, APF, Ty);
       break;
     case LibFunc_ceil:
     case LibFunc_ceilf:
@@ -2101,21 +2196,21 @@
     case LibFunc_cos:
     case LibFunc_cosf:
       if (TLI->has(Func))
-        return ConstantFoldFP(cos, V, Ty);
+        return ConstantFoldFP(cos, APF, Ty);
       break;
     case LibFunc_cosh:
     case LibFunc_coshf:
     case LibFunc_cosh_finite:
     case LibFunc_coshf_finite:
       if (TLI->has(Func))
-        return ConstantFoldFP(cosh, V, Ty);
+        return ConstantFoldFP(cosh, APF, Ty);
       break;
     case LibFunc_exp:
     case LibFunc_expf:
     case LibFunc_exp_finite:
     case LibFunc_expf_finite:
       if (TLI->has(Func))
-        return ConstantFoldFP(exp, V, Ty);
+        return ConstantFoldFP(exp, APF, Ty);
       break;
     case LibFunc_exp2:
     case LibFunc_exp2f:
@@ -2123,7 +2218,7 @@
     case LibFunc_exp2f_finite:
       if (TLI->has(Func))
         // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
-        return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
+        return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
       break;
     case LibFunc_fabs:
     case LibFunc_fabsf:
@@ -2143,24 +2238,24 @@
     case LibFunc_logf:
     case LibFunc_log_finite:
     case LibFunc_logf_finite:
-      if (V > 0.0 && TLI->has(Func))
-        return ConstantFoldFP(log, V, Ty);
+      if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
+        return ConstantFoldFP(log, APF, Ty);
       break;
     case LibFunc_log2:
     case LibFunc_log2f:
     case LibFunc_log2_finite:
     case LibFunc_log2f_finite:
-      if (V > 0.0 && TLI->has(Func))
+      if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
         // TODO: What about hosts that lack a C99 library?
-        return ConstantFoldFP(Log2, V, Ty);
+        return ConstantFoldFP(Log2, APF, Ty);
       break;
     case LibFunc_log10:
     case LibFunc_log10f:
     case LibFunc_log10_finite:
     case LibFunc_log10f_finite:
-      if (V > 0.0 && TLI->has(Func))
+      if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
         // TODO: What about hosts that lack a C99 library?
-        return ConstantFoldFP(log10, V, Ty);
+        return ConstantFoldFP(log10, APF, Ty);
       break;
     case LibFunc_nearbyint:
     case LibFunc_nearbyintf:
@@ -2181,29 +2276,29 @@
     case LibFunc_sin:
     case LibFunc_sinf:
       if (TLI->has(Func))
-        return ConstantFoldFP(sin, V, Ty);
+        return ConstantFoldFP(sin, APF, Ty);
       break;
     case LibFunc_sinh:
     case LibFunc_sinhf:
     case LibFunc_sinh_finite:
     case LibFunc_sinhf_finite:
       if (TLI->has(Func))
-        return ConstantFoldFP(sinh, V, Ty);
+        return ConstantFoldFP(sinh, APF, Ty);
       break;
     case LibFunc_sqrt:
     case LibFunc_sqrtf:
-      if (V >= 0.0 && TLI->has(Func))
-        return ConstantFoldFP(sqrt, V, Ty);
+      if (!APF.isNegative() && TLI->has(Func))
+        return ConstantFoldFP(sqrt, APF, Ty);
       break;
     case LibFunc_tan:
     case LibFunc_tanf:
       if (TLI->has(Func))
-        return ConstantFoldFP(tan, V, Ty);
+        return ConstantFoldFP(tan, APF, Ty);
       break;
     case LibFunc_tanh:
     case LibFunc_tanhf:
       if (TLI->has(Func))
-        return ConstantFoldFP(tanh, V, Ty);
+        return ConstantFoldFP(tanh, APF, Ty);
       break;
     case LibFunc_trunc:
     case LibFunc_truncf:
@@ -2243,20 +2338,20 @@
     }
   }
 
-  if (isa<ConstantAggregateZero>(Operands[0])) {
-    switch (IntrinsicID) {
-    default: break;
-    case Intrinsic::vector_reduce_add:
-    case Intrinsic::vector_reduce_mul:
-    case Intrinsic::vector_reduce_and:
-    case Intrinsic::vector_reduce_or:
-    case Intrinsic::vector_reduce_xor:
-    case Intrinsic::vector_reduce_smin:
-    case Intrinsic::vector_reduce_smax:
-    case Intrinsic::vector_reduce_umin:
-    case Intrinsic::vector_reduce_umax:
-      return ConstantInt::get(Ty, 0);
-    }
+  switch (IntrinsicID) {
+  default: break;
+  case Intrinsic::vector_reduce_add:
+  case Intrinsic::vector_reduce_mul:
+  case Intrinsic::vector_reduce_and:
+  case Intrinsic::vector_reduce_or:
+  case Intrinsic::vector_reduce_xor:
+  case Intrinsic::vector_reduce_smin:
+  case Intrinsic::vector_reduce_smax:
+  case Intrinsic::vector_reduce_umin:
+  case Intrinsic::vector_reduce_umax:
+    if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
+      return C;
+    break;
   }
 
   // Support ConstantVector in case we have an Undef in the top.
@@ -2265,18 +2360,6 @@
     auto *Op = cast<Constant>(Operands[0]);
     switch (IntrinsicID) {
     default: break;
-    case Intrinsic::vector_reduce_add:
-    case Intrinsic::vector_reduce_mul:
-    case Intrinsic::vector_reduce_and:
-    case Intrinsic::vector_reduce_or:
-    case Intrinsic::vector_reduce_xor:
-    case Intrinsic::vector_reduce_smin:
-    case Intrinsic::vector_reduce_smax:
-    case Intrinsic::vector_reduce_umin:
-    case Intrinsic::vector_reduce_umax:
-      if (Constant *C = ConstantFoldVectorReduce(IntrinsicID, Op))
-        return C;
-      break;
     case Intrinsic::x86_sse_cvtss2si:
     case Intrinsic::x86_sse_cvtss2si64:
     case Intrinsic::x86_sse2_cvtsd2si:
@@ -2330,58 +2413,74 @@
     }
   }
 
-  if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
-    if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
+  if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
+    if (!Ty->isFloatingPointTy())
       return nullptr;
-    double Op1V = getValueAsDouble(Op1);
+    APFloat Op1V = Op1->getValueAPF();
 
-    if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
+    if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
       if (Op2->getType() != Op1->getType())
         return nullptr;
+      APFloat Op2V = Op2->getValueAPF();
 
-      double Op2V = getValueAsDouble(Op2);
-      if (IntrinsicID == Intrinsic::pow) {
+      if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
+        RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
+        APFloat Res = Op1V;
+        APFloat::opStatus St;
+        switch (IntrinsicID) {
+        default:
+          return nullptr;
+        case Intrinsic::experimental_constrained_fadd:
+          St = Res.add(Op2V, RM);
+          break;
+        case Intrinsic::experimental_constrained_fsub:
+          St = Res.subtract(Op2V, RM);
+          break;
+        case Intrinsic::experimental_constrained_fmul:
+          St = Res.multiply(Op2V, RM);
+          break;
+        case Intrinsic::experimental_constrained_fdiv:
+          St = Res.divide(Op2V, RM);
+          break;
+        case Intrinsic::experimental_constrained_frem:
+          St = Res.mod(Op2V);
+          break;
+        }
+        if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
+                               St))
+          return ConstantFP::get(Ty->getContext(), Res);
+        return nullptr;
+      }
+
+      switch (IntrinsicID) {
+      default:
+        break;
+      case Intrinsic::copysign:
+        return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
+      case Intrinsic::minnum:
+        return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
+      case Intrinsic::maxnum:
+        return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
+      case Intrinsic::minimum:
+        return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
+      case Intrinsic::maximum:
+        return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
+      }
+
+      if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
+        return nullptr;
+
+      switch (IntrinsicID) {
+      default:
+        break;
+      case Intrinsic::pow:
         return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
-      }
-      if (IntrinsicID == Intrinsic::copysign) {
-        APFloat V1 = Op1->getValueAPF();
-        const APFloat &V2 = Op2->getValueAPF();
-        V1.copySign(V2);
-        return ConstantFP::get(Ty->getContext(), V1);
-      }
-
-      if (IntrinsicID == Intrinsic::minnum) {
-        const APFloat &C1 = Op1->getValueAPF();
-        const APFloat &C2 = Op2->getValueAPF();
-        return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
-      }
-
-      if (IntrinsicID == Intrinsic::maxnum) {
-        const APFloat &C1 = Op1->getValueAPF();
-        const APFloat &C2 = Op2->getValueAPF();
-        return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
-      }
-
-      if (IntrinsicID == Intrinsic::minimum) {
-        const APFloat &C1 = Op1->getValueAPF();
-        const APFloat &C2 = Op2->getValueAPF();
-        return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
-      }
-
-      if (IntrinsicID == Intrinsic::maximum) {
-        const APFloat &C1 = Op1->getValueAPF();
-        const APFloat &C2 = Op2->getValueAPF();
-        return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
-      }
-
-      if (IntrinsicID == Intrinsic::amdgcn_fmul_legacy) {
-        const APFloat &C1 = Op1->getValueAPF();
-        const APFloat &C2 = Op2->getValueAPF();
+      case Intrinsic::amdgcn_fmul_legacy:
         // The legacy behaviour is that multiplying +/- 0.0 by anything, even
         // NaN or infinity, gives +0.0.
-        if (C1.isZero() || C2.isZero())
+        if (Op1V.isZero() || Op2V.isZero())
           return ConstantFP::getNullValue(Ty);
-        return ConstantFP::get(Ty->getContext(), C1 * C2);
+        return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
       }
 
       if (!TLI)
@@ -2424,18 +2523,23 @@
         break;
       }
     } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
+      if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
+        return nullptr;
       if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
-        return ConstantFP::get(Ty->getContext(),
-                               APFloat((float)std::pow((float)Op1V,
-                                               (int)Op2C->getZExtValue())));
+        return ConstantFP::get(
+            Ty->getContext(),
+            APFloat((float)std::pow((float)Op1V.convertToDouble(),
+                                    (int)Op2C->getZExtValue())));
       if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
-        return ConstantFP::get(Ty->getContext(),
-                               APFloat((float)std::pow((float)Op1V,
-                                               (int)Op2C->getZExtValue())));
+        return ConstantFP::get(
+            Ty->getContext(),
+            APFloat((float)std::pow((float)Op1V.convertToDouble(),
+                                    (int)Op2C->getZExtValue())));
       if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
-        return ConstantFP::get(Ty->getContext(),
-                               APFloat((double)std::pow((double)Op1V,
-                                                 (int)Op2C->getZExtValue())));
+        return ConstantFP::get(
+            Ty->getContext(),
+            APFloat((double)std::pow(Op1V.convertToDouble(),
+                                     (int)Op2C->getZExtValue())));
 
       if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
         // FIXME: Should flush denorms depending on FP mode, but that's ignored
@@ -2490,16 +2594,19 @@
 
     case Intrinsic::usub_with_overflow:
     case Intrinsic::ssub_with_overflow:
+      // X - undef -> { 0, false }
+      // undef - X -> { 0, false }
+      if (!C0 || !C1)
+        return Constant::getNullValue(Ty);
+      LLVM_FALLTHROUGH;
     case Intrinsic::uadd_with_overflow:
     case Intrinsic::sadd_with_overflow:
-      // X - undef -> { undef, false }
-      // undef - X -> { undef, false }
-      // X + undef -> { undef, false }
-      // undef + x -> { undef, false }
+      // X + undef -> { -1, false }
+      // undef + x -> { -1, false }
       if (!C0 || !C1) {
         return ConstantStruct::get(
             cast<StructType>(Ty),
-            {UndefValue::get(Ty->getStructElementType(0)),
+            {Constant::getAllOnesValue(Ty->getStructElementType(0)),
              Constant::getNullValue(Ty->getStructElementType(1))});
       }
       LLVM_FALLTHROUGH;
@@ -2699,6 +2806,46 @@
   }
 }
 
+static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
+                                                 Type *Ty) {
+  const APInt *C0, *C1, *C2;
+  if (!getConstIntOrUndef(Operands[0], C0) ||
+      !getConstIntOrUndef(Operands[1], C1) ||
+      !getConstIntOrUndef(Operands[2], C2))
+    return nullptr;
+
+  if (!C2)
+    return UndefValue::get(Ty);
+
+  APInt Val(32, 0);
+  unsigned NumUndefBytes = 0;
+  for (unsigned I = 0; I < 32; I += 8) {
+    unsigned Sel = C2->extractBitsAsZExtValue(8, I);
+    unsigned B = 0;
+
+    if (Sel >= 13)
+      B = 0xff;
+    else if (Sel == 12)
+      B = 0x00;
+    else {
+      const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
+      if (!Src)
+        ++NumUndefBytes;
+      else if (Sel < 8)
+        B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
+      else
+        B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
+    }
+
+    Val.insertBits(B, I, 8);
+  }
+
+  if (NumUndefBytes == 4)
+    return UndefValue::get(Ty);
+
+  return ConstantInt::get(Ty, Val);
+}
+
 static Constant *ConstantFoldScalarCall3(StringRef Name,
                                          Intrinsic::ID IntrinsicID,
                                          Type *Ty,
@@ -2710,15 +2857,34 @@
   if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
     if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
       if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
+        const APFloat &C1 = Op1->getValueAPF();
+        const APFloat &C2 = Op2->getValueAPF();
+        const APFloat &C3 = Op3->getValueAPF();
+
+        if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
+          RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
+          APFloat Res = C1;
+          APFloat::opStatus St;
+          switch (IntrinsicID) {
+          default:
+            return nullptr;
+          case Intrinsic::experimental_constrained_fma:
+          case Intrinsic::experimental_constrained_fmuladd:
+            St = Res.fusedMultiplyAdd(C2, C3, RM);
+            break;
+          }
+          if (mayFoldConstrained(
+                  const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
+            return ConstantFP::get(Ty->getContext(), Res);
+          return nullptr;
+        }
+
         switch (IntrinsicID) {
         default: break;
         case Intrinsic::amdgcn_fma_legacy: {
-          const APFloat &C1 = Op1->getValueAPF();
-          const APFloat &C2 = Op2->getValueAPF();
           // The legacy behaviour is that multiplying +/- 0.0 by anything, even
           // NaN or infinity, gives +0.0.
           if (C1.isZero() || C2.isZero()) {
-            const APFloat &C3 = Op3->getValueAPF();
             // It's tempting to just return C3 here, but that would give the
             // wrong result if C3 was -0.0.
             return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
@@ -2727,18 +2893,15 @@
         }
         case Intrinsic::fma:
         case Intrinsic::fmuladd: {
-          APFloat V = Op1->getValueAPF();
-          V.fusedMultiplyAdd(Op2->getValueAPF(), Op3->getValueAPF(),
-                             APFloat::rmNearestTiesToEven);
+          APFloat V = C1;
+          V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
           return ConstantFP::get(Ty->getContext(), V);
         }
         case Intrinsic::amdgcn_cubeid:
         case Intrinsic::amdgcn_cubema:
         case Intrinsic::amdgcn_cubesc:
         case Intrinsic::amdgcn_cubetc: {
-          APFloat V = ConstantFoldAMDGCNCubeIntrinsic(
-              IntrinsicID, Op1->getValueAPF(), Op2->getValueAPF(),
-              Op3->getValueAPF());
+          APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
           return ConstantFP::get(Ty->getContext(), V);
         }
         }
@@ -2746,41 +2909,42 @@
     }
   }
 
-  if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
-    if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
-      if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) {
-        switch (IntrinsicID) {
-        default: break;
-        case Intrinsic::smul_fix:
-        case Intrinsic::smul_fix_sat: {
-          // This code performs rounding towards negative infinity in case the
-          // result cannot be represented exactly for the given scale. Targets
-          // that do care about rounding should use a target hook for specifying
-          // how rounding should be done, and provide their own folding to be
-          // consistent with rounding. This is the same approach as used by
-          // DAGTypeLegalizer::ExpandIntRes_MULFIX.
-          const APInt &Lhs = Op1->getValue();
-          const APInt &Rhs = Op2->getValue();
-          unsigned Scale = Op3->getValue().getZExtValue();
-          unsigned Width = Lhs.getBitWidth();
-          assert(Scale < Width && "Illegal scale.");
-          unsigned ExtendedWidth = Width * 2;
-          APInt Product = (Lhs.sextOrSelf(ExtendedWidth) *
-                           Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale);
-          if (IntrinsicID == Intrinsic::smul_fix_sat) {
-            APInt MaxValue =
-              APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
-            APInt MinValue =
-              APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
-            Product = APIntOps::smin(Product, MaxValue);
-            Product = APIntOps::smax(Product, MinValue);
-          }
-          return ConstantInt::get(Ty->getContext(),
-                                  Product.sextOrTrunc(Width));
-        }
-        }
-      }
+  if (IntrinsicID == Intrinsic::smul_fix ||
+      IntrinsicID == Intrinsic::smul_fix_sat) {
+    // poison * C -> poison
+    // C * poison -> poison
+    if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
+      return PoisonValue::get(Ty);
+
+    const APInt *C0, *C1;
+    if (!getConstIntOrUndef(Operands[0], C0) ||
+        !getConstIntOrUndef(Operands[1], C1))
+      return nullptr;
+
+    // undef * C -> 0
+    // C * undef -> 0
+    if (!C0 || !C1)
+      return Constant::getNullValue(Ty);
+
+    // This code performs rounding towards negative infinity in case the result
+    // cannot be represented exactly for the given scale. Targets that do care
+    // about rounding should use a target hook for specifying how rounding
+    // should be done, and provide their own folding to be consistent with
+    // rounding. This is the same approach as used by
+    // DAGTypeLegalizer::ExpandIntRes_MULFIX.
+    unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
+    unsigned Width = C0->getBitWidth();
+    assert(Scale < Width && "Illegal scale.");
+    unsigned ExtendedWidth = Width * 2;
+    APInt Product = (C0->sextOrSelf(ExtendedWidth) *
+                     C1->sextOrSelf(ExtendedWidth)).ashr(Scale);
+    if (IntrinsicID == Intrinsic::smul_fix_sat) {
+      APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
+      APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
+      Product = APIntOps::smin(Product, Max);
+      Product = APIntOps::smax(Product, Min);
     }
+    return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
   }
 
   if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
@@ -2813,6 +2977,9 @@
     return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
   }
 
+  if (IntrinsicID == Intrinsic::amdgcn_perm)
+    return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
+
   return nullptr;
 }
 
@@ -2834,20 +3001,10 @@
   return nullptr;
 }
 
-static Constant *ConstantFoldVectorCall(StringRef Name,
-                                        Intrinsic::ID IntrinsicID,
-                                        VectorType *VTy,
-                                        ArrayRef<Constant *> Operands,
-                                        const DataLayout &DL,
-                                        const TargetLibraryInfo *TLI,
-                                        const CallBase *Call) {
-  // Do not iterate on scalable vector. The number of elements is unknown at
-  // compile-time.
-  if (isa<ScalableVectorType>(VTy))
-    return nullptr;
-
-  auto *FVTy = cast<FixedVectorType>(VTy);
-
+static Constant *ConstantFoldFixedVectorCall(
+    StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
+    ArrayRef<Constant *> Operands, const DataLayout &DL,
+    const TargetLibraryInfo *TLI, const CallBase *Call) {
   SmallVector<Constant *, 4> Result(FVTy->getNumElements());
   SmallVector<Constant *, 4> Lane(Operands.size());
   Type *Ty = FVTy->getElementType();
@@ -2964,6 +3121,24 @@
   return ConstantVector::get(Result);
 }
 
+static Constant *ConstantFoldScalableVectorCall(
+    StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
+    ArrayRef<Constant *> Operands, const DataLayout &DL,
+    const TargetLibraryInfo *TLI, const CallBase *Call) {
+  switch (IntrinsicID) {
+  case Intrinsic::aarch64_sve_convert_from_svbool: {
+    auto *Src = dyn_cast<Constant>(Operands[0]);
+    if (!Src || !Src->isNullValue())
+      break;
+
+    return ConstantInt::getFalse(SVTy);
+  }
+  default:
+    break;
+  }
+  return nullptr;
+}
+
 } // end anonymous namespace
 
 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
@@ -2973,14 +3148,31 @@
     return nullptr;
   if (!F->hasName())
     return nullptr;
+
+  // If this is not an intrinsic and not recognized as a library call, bail out.
+  if (F->getIntrinsicID() == Intrinsic::not_intrinsic) {
+    if (!TLI)
+      return nullptr;
+    LibFunc LibF;
+    if (!TLI->getLibFunc(*F, LibF))
+      return nullptr;
+  }
+
   StringRef Name = F->getName();
-
   Type *Ty = F->getReturnType();
+  if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
+    return ConstantFoldFixedVectorCall(
+        Name, F->getIntrinsicID(), FVTy, Operands,
+        F->getParent()->getDataLayout(), TLI, Call);
 
-  if (auto *VTy = dyn_cast<VectorType>(Ty))
-    return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
-                                  F->getParent()->getDataLayout(), TLI, Call);
+  if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
+    return ConstantFoldScalableVectorCall(
+        Name, F->getIntrinsicID(), SVTy, Operands,
+        F->getParent()->getDataLayout(), TLI, Call);
 
+  // TODO: If this is a library function, we already discovered that above,
+  //       so we should pass the LibFunc, not the name (and it might be better
+  //       still to separate intrinsic handling from libcalls).
   return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
                                 Call);
 }
@@ -3048,10 +3240,8 @@
         // FIXME: Stop using the host math library.
         // FIXME: The computation isn't done in the right precision.
         Type *Ty = OpC->getType();
-        if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
-          double OpV = getValueAsDouble(OpC);
-          return ConstantFoldFP(tan, OpV, Ty) != nullptr;
-        }
+        if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
+          return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
         break;
       }
 
@@ -3105,11 +3295,8 @@
         // FIXME: The computation isn't done in the right precision.
         Type *Ty = Op0C->getType();
         if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
-          if (Ty == Op1C->getType()) {
-            double Op0V = getValueAsDouble(Op0C);
-            double Op1V = getValueAsDouble(Op1C);
-            return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
-          }
+          if (Ty == Op1C->getType())
+            return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
         }
         break;
       }
diff --git a/src/llvm-project/llvm/lib/Analysis/CostModel.cpp b/src/llvm-project/llvm/lib/Analysis/CostModel.cpp
index 19c307b..83b7d5c 100644
--- a/src/llvm-project/llvm/lib/Analysis/CostModel.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/CostModel.cpp
@@ -107,7 +107,7 @@
       if (auto CostVal = Cost.getValue())
         OS << "Cost Model: Found an estimated cost of " << *CostVal;
       else
-        OS << "Cost Model: Unknown cost";
+        OS << "Cost Model: Invalid cost";
 
       OS << " for instruction: " << Inst << "\n";
     }
diff --git a/src/llvm-project/llvm/lib/Analysis/DDGPrinter.cpp b/src/llvm-project/llvm/lib/Analysis/DDGPrinter.cpp
index 51bd548..0d5a936 100644
--- a/src/llvm-project/llvm/lib/Analysis/DDGPrinter.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/DDGPrinter.cpp
@@ -42,7 +42,7 @@
   errs() << "Writing '" << Filename << "'...";
 
   std::error_code EC;
-  raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
+  raw_fd_ostream File(Filename, EC, sys::fs::OF_Text);
 
   if (!EC)
     // We only provide the constant verson of the DOTGraphTrait specialization,
diff --git a/src/llvm-project/llvm/lib/Analysis/Delinearization.cpp b/src/llvm-project/llvm/lib/Analysis/Delinearization.cpp
index 87a41bb..448e970 100644
--- a/src/llvm-project/llvm/lib/Analysis/Delinearization.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/Delinearization.cpp
@@ -59,19 +59,17 @@
 void printDelinearization(raw_ostream &O, Function *F, LoopInfo *LI,
                           ScalarEvolution *SE) {
   O << "Delinearization on function " << F->getName() << ":\n";
-  for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
-    Instruction *Inst = &(*I);
-
+  for (Instruction &Inst : instructions(F)) {
     // Only analyze loads and stores.
-    if (!isa<StoreInst>(Inst) && !isa<LoadInst>(Inst) &&
-        !isa<GetElementPtrInst>(Inst))
+    if (!isa<StoreInst>(&Inst) && !isa<LoadInst>(&Inst) &&
+        !isa<GetElementPtrInst>(&Inst))
       continue;
 
-    const BasicBlock *BB = Inst->getParent();
+    const BasicBlock *BB = Inst.getParent();
     // Delinearize the memory access as analyzed in all the surrounding loops.
     // Do not analyze memory accesses outside loops.
     for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) {
-      const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(Inst), L);
+      const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(&Inst), L);
 
       const SCEVUnknown *BasePointer =
           dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFn));
@@ -81,12 +79,12 @@
       AccessFn = SE->getMinusSCEV(AccessFn, BasePointer);
 
       O << "\n";
-      O << "Inst:" << *Inst << "\n";
+      O << "Inst:" << Inst << "\n";
       O << "In Loop with Header: " << L->getHeader()->getName() << "\n";
       O << "AccessFunction: " << *AccessFn << "\n";
 
       SmallVector<const SCEV *, 3> Subscripts, Sizes;
-      SE->delinearize(AccessFn, Subscripts, Sizes, SE->getElementSize(Inst));
+      SE->delinearize(AccessFn, Subscripts, Sizes, SE->getElementSize(&Inst));
       if (Subscripts.size() == 0 || Sizes.size() == 0 ||
           Subscripts.size() != Sizes.size()) {
         O << "failed to delinearize\n";
diff --git a/src/llvm-project/llvm/lib/Analysis/DemandedBits.cpp b/src/llvm-project/llvm/lib/Analysis/DemandedBits.cpp
index dd11b0b..ca6d58f 100644
--- a/src/llvm-project/llvm/lib/Analysis/DemandedBits.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/DemandedBits.cpp
@@ -80,7 +80,7 @@
 
 static bool isAlwaysLive(Instruction *I) {
   return I->isTerminator() || isa<DbgInfoIntrinsic>(I) || I->isEHPad() ||
-         I->mayHaveSideEffects() || !I->willReturn();
+         I->mayHaveSideEffects();
 }
 
 void DemandedBits::determineLiveOperandBits(
@@ -452,6 +452,33 @@
       DL.getTypeSizeInBits(I->getType()->getScalarType()));
 }
 
+APInt DemandedBits::getDemandedBits(Use *U) {
+  Type *T = (*U)->getType();
+  Instruction *UserI = cast<Instruction>(U->getUser());
+  const DataLayout &DL = UserI->getModule()->getDataLayout();
+  unsigned BitWidth = DL.getTypeSizeInBits(T->getScalarType());
+
+  // We only track integer uses, everything else produces a mask with all bits
+  // set
+  if (!T->isIntOrIntVectorTy())
+    return APInt::getAllOnesValue(BitWidth);
+
+  if (isUseDead(U))
+    return APInt(BitWidth, 0);
+
+  performAnalysis();
+
+  APInt AOut = getDemandedBits(UserI);
+  APInt AB = APInt::getAllOnesValue(BitWidth);
+  KnownBits Known, Known2;
+  bool KnownBitsComputed = false;
+
+  determineLiveOperandBits(UserI, *U, U->getOperandNo(), AOut, AB, Known,
+                           Known2, KnownBitsComputed);
+
+  return AB;
+}
+
 bool DemandedBits::isInstructionDead(Instruction *I) {
   performAnalysis();
 
@@ -485,10 +512,24 @@
 }
 
 void DemandedBits::print(raw_ostream &OS) {
+  auto PrintDB = [&](const Instruction *I, const APInt &A, Value *V = nullptr) {
+    OS << "DemandedBits: 0x" << Twine::utohexstr(A.getLimitedValue())
+       << " for ";
+    if (V) {
+      V->printAsOperand(OS, false);
+      OS << " in ";
+    }
+    OS << *I << '\n';
+  };
+
   performAnalysis();
   for (auto &KV : AliveBits) {
-    OS << "DemandedBits: 0x" << Twine::utohexstr(KV.second.getLimitedValue())
-       << " for " << *KV.first << '\n';
+    Instruction *I = KV.first;
+    PrintDB(I, KV.second);
+
+    for (Use &OI : I->operands()) {
+      PrintDB(I, getDemandedBits(&OI), OI);
+    }
   }
 }
 
diff --git a/src/llvm-project/llvm/lib/Analysis/DependenceAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/DependenceAnalysis.cpp
index c2c6113..9564cfb 100644
--- a/src/llvm-project/llvm/lib/Analysis/DependenceAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/DependenceAnalysis.cpp
@@ -657,8 +657,8 @@
       MemoryLocation::getBeforeOrAfter(LocA.Ptr, LocA.AATags);
   MemoryLocation LocBS =
       MemoryLocation::getBeforeOrAfter(LocB.Ptr, LocB.AATags);
-  if (AA->alias(LocAS, LocBS) == NoAlias)
-    return NoAlias;
+  if (AA->isNoAlias(LocAS, LocBS))
+    return AliasResult::NoAlias;
 
   // Check the underlying objects are the same
   const Value *AObj = getUnderlyingObject(LocA.Ptr);
@@ -666,16 +666,16 @@
 
   // If the underlying objects are the same, they must alias
   if (AObj == BObj)
-    return MustAlias;
+    return AliasResult::MustAlias;
 
   // We may have hit the recursion limit for underlying objects, or have
   // underlying objects where we don't know they will alias.
   if (!isIdentifiedObject(AObj) || !isIdentifiedObject(BObj))
-    return MayAlias;
+    return AliasResult::MayAlias;
 
   // Otherwise we know the objects are different and both identified objects so
   // must not alias.
-  return NoAlias;
+  return AliasResult::NoAlias;
 }
 
 
@@ -1430,8 +1430,6 @@
   if (R != 0)
     return true; // gcd doesn't divide Delta, no dependence
   Q = Delta.sdiv(G);
-  X *= Q;
-  Y *= Q;
   return false;
 }
 
@@ -1465,17 +1463,21 @@
 // When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*i],
 // where i is an induction variable, c1 and c2 are loop invariant, and a1
 // and a2 are constant, we can solve it exactly using an algorithm developed
-// by Banerjee and Wolfe. See Section 2.5.3 in
+// by Banerjee and Wolfe. See Algorithm 6.2.1 (case 2.5) in:
 //
-//        Optimizing Supercompilers for Supercomputers
-//        Michael Wolfe
-//        MIT Press, 1989
+//        Dependence Analysis for Supercomputing
+//        Utpal Banerjee
+//        Kluwer Academic Publishers, 1988
 //
 // It's slower than the specialized tests (strong SIV, weak-zero SIV, etc),
 // so use them if possible. They're also a bit better with symbolics and,
 // in the case of the strong SIV test, can compute Distances.
 //
 // Return true if dependence disproved.
+//
+// This is a modified version of the original Banerjee algorithm. The original
+// only tested whether Dst depends on Src. This algorithm extends that and
+// returns all the dependencies that exist between Dst and Src.
 bool DependenceInfo::exactSIVtest(const SCEV *SrcCoeff, const SCEV *DstCoeff,
                                   const SCEV *SrcConst, const SCEV *DstConst,
                                   const Loop *CurLoop, unsigned Level,
@@ -1492,8 +1494,8 @@
   Result.Consistent = false;
   const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst);
   LLVM_DEBUG(dbgs() << "\t    Delta = " << *Delta << "\n");
-  NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff),
-                        Delta, CurLoop);
+  NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff), Delta,
+                        CurLoop);
   const SCEVConstant *ConstDelta = dyn_cast<SCEVConstant>(Delta);
   const SCEVConstant *ConstSrcCoeff = dyn_cast<SCEVConstant>(SrcCoeff);
   const SCEVConstant *ConstDstCoeff = dyn_cast<SCEVConstant>(DstCoeff);
@@ -1504,8 +1506,9 @@
   APInt G, X, Y;
   APInt AM = ConstSrcCoeff->getAPInt();
   APInt BM = ConstDstCoeff->getAPInt();
+  APInt CM = ConstDelta->getAPInt();
   unsigned Bits = AM.getBitWidth();
-  if (findGCD(Bits, AM, BM, ConstDelta->getAPInt(), G, X, Y)) {
+  if (findGCD(Bits, AM, BM, CM, G, X, Y)) {
     // gcd doesn't divide Delta, no dependence
     ++ExactSIVindependence;
     ++ExactSIVsuccesses;
@@ -1516,55 +1519,73 @@
 
   // since SCEV construction normalizes, LM = 0
   APInt UM(Bits, 1, true);
-  bool UMvalid = false;
+  bool UMValid = false;
   // UM is perhaps unavailable, let's check
   if (const SCEVConstant *CUB =
-      collectConstantUpperBound(CurLoop, Delta->getType())) {
+          collectConstantUpperBound(CurLoop, Delta->getType())) {
     UM = CUB->getAPInt();
     LLVM_DEBUG(dbgs() << "\t    UM = " << UM << "\n");
-    UMvalid = true;
+    UMValid = true;
   }
 
   APInt TU(APInt::getSignedMaxValue(Bits));
   APInt TL(APInt::getSignedMinValue(Bits));
+  APInt TC = CM.sdiv(G);
+  APInt TX = X * TC;
+  APInt TY = Y * TC;
+  LLVM_DEBUG(dbgs() << "\t    TC = " << TC << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TX = " << TX << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TY = " << TY << "\n");
 
-  // test(BM/G, LM-X) and test(-BM/G, X-UM)
-  APInt TMUL = BM.sdiv(G);
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(-X, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
-    if (UMvalid) {
-      TU = APIntOps::smin(TU, floorOfQuotient(UM - X, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
+  SmallVector<APInt, 2> TLVec, TUVec;
+  APInt TB = BM.sdiv(G);
+  if (TB.sgt(0)) {
+    TLVec.push_back(ceilingOfQuotient(-TX, TB));
+    LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
+    // New bound check - modification to Banerjee's e3 check
+    if (UMValid) {
+      TUVec.push_back(floorOfQuotient(UM - TX, TB));
+      LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
     }
-  }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(-X, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
-    if (UMvalid) {
-      TL = APIntOps::smax(TL, ceilingOfQuotient(UM - X, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
+  } else {
+    TUVec.push_back(floorOfQuotient(-TX, TB));
+    LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
+    // New bound check - modification to Banerjee's e3 check
+    if (UMValid) {
+      TLVec.push_back(ceilingOfQuotient(UM - TX, TB));
+      LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
     }
   }
 
-  // test(AM/G, LM-Y) and test(-AM/G, Y-UM)
-  TMUL = AM.sdiv(G);
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(-Y, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
-    if (UMvalid) {
-      TU = APIntOps::smin(TU, floorOfQuotient(UM - Y, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
+  APInt TA = AM.sdiv(G);
+  if (TA.sgt(0)) {
+    if (UMValid) {
+      TUVec.push_back(floorOfQuotient(UM - TY, TA));
+      LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
     }
-  }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(-Y, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
-    if (UMvalid) {
-      TL = APIntOps::smax(TL, ceilingOfQuotient(UM - Y, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
+    // New bound check - modification to Banerjee's e3 check
+    TLVec.push_back(ceilingOfQuotient(-TY, TA));
+    LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
+  } else {
+    if (UMValid) {
+      TLVec.push_back(ceilingOfQuotient(UM - TY, TA));
+      LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
     }
+    // New bound check - modification to Banerjee's e3 check
+    TUVec.push_back(floorOfQuotient(-TY, TA));
+    LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
   }
+
+  LLVM_DEBUG(dbgs() << "\t    TA = " << TA << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TB = " << TB << "\n");
+
+  if (TLVec.empty() || TUVec.empty())
+    return false;
+  TL = APIntOps::smax(TLVec.front(), TLVec.back());
+  TU = APIntOps::smin(TUVec.front(), TUVec.back());
+  LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
+
   if (TL.sgt(TU)) {
     ++ExactSIVindependence;
     ++ExactSIVsuccesses;
@@ -1573,77 +1594,42 @@
 
   // explore directions
   unsigned NewDirection = Dependence::DVEntry::NONE;
-
-  // less than
-  APInt SaveTU(TU); // save these
-  APInt SaveTL(TL);
-  LLVM_DEBUG(dbgs() << "\t    exploring LT direction\n");
-  TMUL = AM - BM;
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(X - Y + 1, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TL = " << TL << "\n");
-  }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(X - Y + 1, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TU = " << TU << "\n");
-  }
-  if (TL.sle(TU)) {
-    NewDirection |= Dependence::DVEntry::LT;
-    ++ExactSIVsuccesses;
+  APInt LowerDistance, UpperDistance;
+  if (TA.sgt(TB)) {
+    LowerDistance = (TY - TX) + (TA - TB) * TL;
+    UpperDistance = (TY - TX) + (TA - TB) * TU;
+  } else {
+    LowerDistance = (TY - TX) + (TA - TB) * TU;
+    UpperDistance = (TY - TX) + (TA - TB) * TL;
   }
 
-  // equal
-  TU = SaveTU; // restore
-  TL = SaveTL;
-  LLVM_DEBUG(dbgs() << "\t    exploring EQ direction\n");
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(X - Y, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TL = " << TL << "\n");
-  }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(X - Y, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TU = " << TU << "\n");
-  }
-  TMUL = BM - AM;
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(Y - X, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TL = " << TL << "\n");
-  }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(Y - X, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TU = " << TU << "\n");
-  }
-  if (TL.sle(TU)) {
+  LLVM_DEBUG(dbgs() << "\t    LowerDistance = " << LowerDistance << "\n");
+  LLVM_DEBUG(dbgs() << "\t    UpperDistance = " << UpperDistance << "\n");
+
+  APInt Zero(Bits, 0, true);
+  if (LowerDistance.sle(Zero) && UpperDistance.sge(Zero)) {
     NewDirection |= Dependence::DVEntry::EQ;
     ++ExactSIVsuccesses;
   }
-
-  // greater than
-  TU = SaveTU; // restore
-  TL = SaveTL;
-  LLVM_DEBUG(dbgs() << "\t    exploring GT direction\n");
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(Y - X + 1, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TL = " << TL << "\n");
-  }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(Y - X + 1, TMUL));
-    LLVM_DEBUG(dbgs() << "\t\t    TU = " << TU << "\n");
-  }
-  if (TL.sle(TU)) {
+  if (LowerDistance.slt(0)) {
     NewDirection |= Dependence::DVEntry::GT;
     ++ExactSIVsuccesses;
   }
+  if (UpperDistance.sgt(0)) {
+    NewDirection |= Dependence::DVEntry::LT;
+    ++ExactSIVsuccesses;
+  }
 
   // finished
   Result.DV[Level].Direction &= NewDirection;
   if (Result.DV[Level].Direction == Dependence::DVEntry::NONE)
     ++ExactSIVindependence;
+  LLVM_DEBUG(dbgs() << "\t    Result = ");
+  LLVM_DEBUG(Result.dump(dbgs()));
   return Result.DV[Level].Direction == Dependence::DVEntry::NONE;
 }
 
 
-
 // Return true if the divisor evenly divides the dividend.
 static
 bool isRemainderZero(const SCEVConstant *Dividend,
@@ -1903,8 +1889,9 @@
   APInt G, X, Y;
   APInt AM = ConstSrcCoeff->getAPInt();
   APInt BM = ConstDstCoeff->getAPInt();
+  APInt CM = ConstDelta->getAPInt();
   unsigned Bits = AM.getBitWidth();
-  if (findGCD(Bits, AM, BM, ConstDelta->getAPInt(), G, X, Y)) {
+  if (findGCD(Bits, AM, BM, CM, G, X, Y)) {
     // gcd doesn't divide Delta, no dependence
     ++ExactRDIVindependence;
     return true;
@@ -1917,7 +1904,7 @@
   bool SrcUMvalid = false;
   // SrcUM is perhaps unavailable, let's check
   if (const SCEVConstant *UpperBound =
-      collectConstantUpperBound(SrcLoop, Delta->getType())) {
+          collectConstantUpperBound(SrcLoop, Delta->getType())) {
     SrcUM = UpperBound->getAPInt();
     LLVM_DEBUG(dbgs() << "\t    SrcUM = " << SrcUM << "\n");
     SrcUMvalid = true;
@@ -1927,7 +1914,7 @@
   bool DstUMvalid = false;
   // UM is perhaps unavailable, let's check
   if (const SCEVConstant *UpperBound =
-      collectConstantUpperBound(DstLoop, Delta->getType())) {
+          collectConstantUpperBound(DstLoop, Delta->getType())) {
     DstUM = UpperBound->getAPInt();
     LLVM_DEBUG(dbgs() << "\t    DstUM = " << DstUM << "\n");
     DstUMvalid = true;
@@ -1935,44 +1922,59 @@
 
   APInt TU(APInt::getSignedMaxValue(Bits));
   APInt TL(APInt::getSignedMinValue(Bits));
+  APInt TC = CM.sdiv(G);
+  APInt TX = X * TC;
+  APInt TY = Y * TC;
+  LLVM_DEBUG(dbgs() << "\t    TC = " << TC << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TX = " << TX << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TY = " << TY << "\n");
 
-  // test(BM/G, LM-X) and test(-BM/G, X-UM)
-  APInt TMUL = BM.sdiv(G);
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(-X, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
+  SmallVector<APInt, 2> TLVec, TUVec;
+  APInt TB = BM.sdiv(G);
+  if (TB.sgt(0)) {
+    TLVec.push_back(ceilingOfQuotient(-TX, TB));
+    LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
     if (SrcUMvalid) {
-      TU = APIntOps::smin(TU, floorOfQuotient(SrcUM - X, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
+      TUVec.push_back(floorOfQuotient(SrcUM - TX, TB));
+      LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
     }
-  }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(-X, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
+  } else {
+    TUVec.push_back(floorOfQuotient(-TX, TB));
+    LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
     if (SrcUMvalid) {
-      TL = APIntOps::smax(TL, ceilingOfQuotient(SrcUM - X, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
+      TLVec.push_back(ceilingOfQuotient(SrcUM - TX, TB));
+      LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
     }
   }
 
-  // test(AM/G, LM-Y) and test(-AM/G, Y-UM)
-  TMUL = AM.sdiv(G);
-  if (TMUL.sgt(0)) {
-    TL = APIntOps::smax(TL, ceilingOfQuotient(-Y, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
+  APInt TA = AM.sdiv(G);
+  if (TA.sgt(0)) {
+    TLVec.push_back(ceilingOfQuotient(-TY, TA));
+    LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
     if (DstUMvalid) {
-      TU = APIntOps::smin(TU, floorOfQuotient(DstUM - Y, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
+      TUVec.push_back(floorOfQuotient(DstUM - TY, TA));
+      LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
+    }
+  } else {
+    TUVec.push_back(floorOfQuotient(-TY, TA));
+    LLVM_DEBUG(dbgs() << "\t    Possible TU = " << TUVec.back() << "\n");
+    if (DstUMvalid) {
+      TLVec.push_back(ceilingOfQuotient(DstUM - TY, TA));
+      LLVM_DEBUG(dbgs() << "\t    Possible TL = " << TLVec.back() << "\n");
     }
   }
-  else {
-    TU = APIntOps::smin(TU, floorOfQuotient(-Y, TMUL));
-    LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
-    if (DstUMvalid) {
-      TL = APIntOps::smax(TL, ceilingOfQuotient(DstUM - Y, TMUL));
-      LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
-    }
-  }
+
+  if (TLVec.empty() || TUVec.empty())
+    return false;
+
+  LLVM_DEBUG(dbgs() << "\t    TA = " << TA << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TB = " << TB << "\n");
+
+  TL = APIntOps::smax(TLVec.front(), TLVec.back());
+  TU = APIntOps::smin(TUVec.front(), TUVec.back());
+  LLVM_DEBUG(dbgs() << "\t    TL = " << TL << "\n");
+  LLVM_DEBUG(dbgs() << "\t    TU = " << TU << "\n");
+
   if (TL.sgt(TU))
     ++ExactRDIVindependence;
   return TL.sgt(TU);
@@ -3302,16 +3304,6 @@
     const SCEV *DstAccessFn, SmallVectorImpl<const SCEV *> &SrcSubscripts,
     SmallVectorImpl<const SCEV *> &DstSubscripts) {
 
-  // In general we cannot safely assume that the subscripts recovered from GEPs
-  // are in the range of values defined for their corresponding array
-  // dimensions. For example some C language usage/interpretation make it
-  // impossible to verify this at compile-time. As such we give up here unless
-  // we can assume that the subscripts do not overlap into neighboring
-  // dimensions and that the number of dimensions matches the number of
-  // subscripts being recovered.
-  if (!DisableDelinearizationChecks)
-    return false;
-
   Value *SrcPtr = getLoadStorePointerOperand(Src);
   Value *DstPtr = getLoadStorePointerOperand(Dst);
   const SCEVUnknown *SrcBase =
@@ -3350,22 +3342,55 @@
 
   // Check that for identical base pointers we do not miss index offsets
   // that have been added before this GEP is applied.
-  if (SrcBasePtr == SrcBase->getValue() && DstBasePtr == DstBase->getValue()) {
-    assert(SrcSubscripts.size() == DstSubscripts.size() &&
-           SrcSubscripts.size() == SrcSizes.size() + 1 &&
-           "Expected equal number of entries in the list of sizes and "
-           "subscripts.");
-    LLVM_DEBUG({
-      dbgs() << "Delinearized subscripts of fixed-size array\n"
-             << "SrcGEP:" << *SrcGEP << "\n"
-             << "DstGEP:" << *DstGEP << "\n";
-    });
-    return true;
+  if (SrcBasePtr != SrcBase->getValue() || DstBasePtr != DstBase->getValue()) {
+    SrcSubscripts.clear();
+    DstSubscripts.clear();
+    return false;
   }
 
-  SrcSubscripts.clear();
-  DstSubscripts.clear();
-  return false;
+  assert(SrcSubscripts.size() == DstSubscripts.size() &&
+         SrcSubscripts.size() == SrcSizes.size() + 1 &&
+         "Expected equal number of entries in the list of sizes and "
+         "subscripts.");
+
+  // In general we cannot safely assume that the subscripts recovered from GEPs
+  // are in the range of values defined for their corresponding array
+  // dimensions. For example some C language usage/interpretation make it
+  // impossible to verify this at compile-time. As such we can only delinearize
+  // iff the subscripts are positive and are less than the range of the
+  // dimension.
+  if (!DisableDelinearizationChecks) {
+    auto AllIndiciesInRange = [&](SmallVector<int, 4> &DimensionSizes,
+                                  SmallVectorImpl<const SCEV *> &Subscripts,
+                                  Value *Ptr) {
+      size_t SSize = Subscripts.size();
+      for (size_t I = 1; I < SSize; ++I) {
+        const SCEV *S = Subscripts[I];
+        if (!isKnownNonNegative(S, Ptr))
+          return false;
+        if (auto *SType = dyn_cast<IntegerType>(S->getType())) {
+          const SCEV *Range = SE->getConstant(
+              ConstantInt::get(SType, DimensionSizes[I - 1], false));
+          if (!isKnownLessThan(S, Range))
+            return false;
+        }
+      }
+      return true;
+    };
+
+    if (!AllIndiciesInRange(SrcSizes, SrcSubscripts, SrcPtr) ||
+        !AllIndiciesInRange(DstSizes, DstSubscripts, DstPtr)) {
+      SrcSubscripts.clear();
+      DstSubscripts.clear();
+      return false;
+    }
+  }
+  LLVM_DEBUG({
+    dbgs() << "Delinearized subscripts of fixed-size array\n"
+           << "SrcGEP:" << *SrcGEP << "\n"
+           << "DstGEP:" << *DstGEP << "\n";
+  });
+  return true;
 }
 
 bool DependenceInfo::tryDelinearizeParametricSize(
@@ -3501,16 +3526,16 @@
   switch (underlyingObjectsAlias(AA, F->getParent()->getDataLayout(),
                                  MemoryLocation::get(Dst),
                                  MemoryLocation::get(Src))) {
-  case MayAlias:
-  case PartialAlias:
+  case AliasResult::MayAlias:
+  case AliasResult::PartialAlias:
     // cannot analyse objects if we don't understand their aliasing.
     LLVM_DEBUG(dbgs() << "can't analyze may or partial alias\n");
     return std::make_unique<Dependence>(Src, Dst);
-  case NoAlias:
+  case AliasResult::NoAlias:
     // If the objects noalias, they are distinct, accesses are independent.
     LLVM_DEBUG(dbgs() << "no alias\n");
     return nullptr;
-  case MustAlias:
+  case AliasResult::MustAlias:
     break; // The underlying objects alias; test accesses for dependence.
   }
 
@@ -3528,6 +3553,16 @@
   const SCEV *DstSCEV = SE->getSCEV(DstPtr);
   LLVM_DEBUG(dbgs() << "    SrcSCEV = " << *SrcSCEV << "\n");
   LLVM_DEBUG(dbgs() << "    DstSCEV = " << *DstSCEV << "\n");
+  if (SE->getPointerBase(SrcSCEV) != SE->getPointerBase(DstSCEV)) {
+    // If two pointers have different bases, trying to analyze indexes won't
+    // work; we can't compare them to each other. This can happen, for example,
+    // if one is produced by an LCSSA PHI node.
+    //
+    // We check this upfront so we don't crash in cases where getMinusSCEV()
+    // returns a SCEVCouldNotCompute.
+    LLVM_DEBUG(dbgs() << "can't analyze SCEV with different pointer base\n");
+    return std::make_unique<Dependence>(Src, Dst);
+  }
   Pair[0].Src = SrcSCEV;
   Pair[0].Dst = DstSCEV;
 
@@ -3914,9 +3949,9 @@
   assert(isLoadOrStore(Dst));
   Value *SrcPtr = getLoadStorePointerOperand(Src);
   Value *DstPtr = getLoadStorePointerOperand(Dst);
-  assert(underlyingObjectsAlias(AA, F->getParent()->getDataLayout(),
-                                MemoryLocation::get(Dst),
-                                MemoryLocation::get(Src)) == MustAlias);
+  assert(underlyingObjectsAlias(
+             AA, F->getParent()->getDataLayout(), MemoryLocation::get(Dst),
+             MemoryLocation::get(Src)) == AliasResult::MustAlias);
 
   // establish loop nesting levels
   establishNestingLevels(Src, Dst);
diff --git a/src/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/src/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index e138e82..ecfefa3 100644
--- a/src/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -160,7 +160,9 @@
     *CurrentNativeSize += Change;
   }
   void resetNativeSize(Function *F) {
-    FAM.invalidate<InlineSizeEstimatorAnalysis>(*F);
+    PreservedAnalyses PA = PreservedAnalyses::all();
+    PA.abandon<InlineSizeEstimatorAnalysis>();
+    FAM.invalidate(*F, PA);
   }
 
   std::unique_ptr<MLInlineAdvice>
@@ -351,24 +353,22 @@
   size_t CurrentFeature = 0;
   for (; CurrentFeature < NumberOfFeatures; ++CurrentFeature) {
     int64_t F = ModelRunner.getFeature(CurrentFeature);
-    L->logTensorValue(CurrentFeature, &F);
+    L->logInt64Value(CurrentFeature, &F);
   }
 
   for (size_t I = 1; I < OutputCount; ++I) {
     const auto &Result = *MUTR->lastEvaluationResult();
-    auto &Spec = MUTR->outputLoggedFeatureSpecs()[I].Spec;
     const char *RawData =
         reinterpret_cast<const char *>(Result.getUntypedTensorValue(I));
-    L->logTensorValue(CurrentFeature, RawData,
-                      Spec.getElementCount() * Spec.getElementByteSize());
+    L->logSpecifiedTensorValue(CurrentFeature, RawData);
     ++CurrentFeature;
   }
 
   assert(CurrentFeature == DefaultDecisionPos);
-  L->logTensorValue(DefaultDecisionPos, &Event.DefaultDecision);
-  L->logTensorValue(DecisionPos, &Event.AdvisedDecision);
+  L->logInt64Value(DefaultDecisionPos, &Event.DefaultDecision);
+  L->logInt64Value(DecisionPos, &Event.AdvisedDecision);
   if (InlineSizeEstimatorAnalysis::isEvaluatorRequested())
-    L->logReward(Event.Reward);
+    L->logInt64Reward(Event.Reward);
 
   // For debugging / later use
   Effects.push_back(Event.Effect);
diff --git a/src/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp
index 287c132..3634526 100644
--- a/src/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/DivergenceAnalysis.cpp
@@ -31,10 +31,10 @@
 //   Ralf Karrenberg and Sebastian Hack
 //   CC '12
 //
-// This DivergenceAnalysis implementation is generic in the sense that it does
+// This implementation is generic in the sense that it does
 // not itself identify original sources of divergence.
 // Instead specialized adapter classes, (LoopDivergenceAnalysis) for loops and
-// (GPUDivergenceAnalysis) for GPU programs, identify the sources of divergence
+// (DivergenceAnalysis) for functions, identify the sources of divergence
 // (e.g., special variables that hold the thread ID or the iteration variable).
 //
 // The generic implementation propagates divergence to variables that are data
@@ -61,7 +61,7 @@
 // The sync dependence detection (which branch induces divergence in which join
 // points) is implemented in the SyncDependenceAnalysis.
 //
-// The current DivergenceAnalysis implementation has the following limitations:
+// The current implementation has the following limitations:
 // 1. intra-procedural. It conservatively considers the arguments of a
 //    non-kernel-entry function and the return value of a function call as
 //    divergent.
@@ -73,6 +73,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Analysis/DivergenceAnalysis.h"
+#include "llvm/Analysis/CFG.h"
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/Passes.h"
 #include "llvm/Analysis/PostDominators.h"
@@ -87,16 +88,15 @@
 
 using namespace llvm;
 
-#define DEBUG_TYPE "divergence-analysis"
+#define DEBUG_TYPE "divergence"
 
-// class DivergenceAnalysis
-DivergenceAnalysis::DivergenceAnalysis(
+DivergenceAnalysisImpl::DivergenceAnalysisImpl(
     const Function &F, const Loop *RegionLoop, const DominatorTree &DT,
     const LoopInfo &LI, SyncDependenceAnalysis &SDA, bool IsLCSSAForm)
     : F(F), RegionLoop(RegionLoop), DT(DT), LI(LI), SDA(SDA),
       IsLCSSAForm(IsLCSSAForm) {}
 
-bool DivergenceAnalysis::markDivergent(const Value &DivVal) {
+bool DivergenceAnalysisImpl::markDivergent(const Value &DivVal) {
   if (isAlwaysUniform(DivVal))
     return false;
   assert(isa<Instruction>(DivVal) || isa<Argument>(DivVal));
@@ -104,12 +104,12 @@
   return DivergentValues.insert(&DivVal).second;
 }
 
-void DivergenceAnalysis::addUniformOverride(const Value &UniVal) {
+void DivergenceAnalysisImpl::addUniformOverride(const Value &UniVal) {
   UniformOverrides.insert(&UniVal);
 }
 
-bool DivergenceAnalysis::isTemporalDivergent(const BasicBlock &ObservingBlock,
-                                             const Value &Val) const {
+bool DivergenceAnalysisImpl::isTemporalDivergent(
+    const BasicBlock &ObservingBlock, const Value &Val) const {
   const auto *Inst = dyn_cast<const Instruction>(&Val);
   if (!Inst)
     return false;
@@ -125,15 +125,15 @@
   return false;
 }
 
-bool DivergenceAnalysis::inRegion(const Instruction &I) const {
+bool DivergenceAnalysisImpl::inRegion(const Instruction &I) const {
   return I.getParent() && inRegion(*I.getParent());
 }
 
-bool DivergenceAnalysis::inRegion(const BasicBlock &BB) const {
+bool DivergenceAnalysisImpl::inRegion(const BasicBlock &BB) const {
   return (!RegionLoop && BB.getParent() == &F) || RegionLoop->contains(&BB);
 }
 
-void DivergenceAnalysis::pushUsers(const Value &V) {
+void DivergenceAnalysisImpl::pushUsers(const Value &V) {
   const auto *I = dyn_cast<const Instruction>(&V);
 
   if (I && I->isTerminator()) {
@@ -166,8 +166,8 @@
   return I;
 }
 
-void DivergenceAnalysis::analyzeTemporalDivergence(const Instruction &I,
-                                                   const Loop &OuterDivLoop) {
+void DivergenceAnalysisImpl::analyzeTemporalDivergence(
+    const Instruction &I, const Loop &OuterDivLoop) {
   if (isAlwaysUniform(I))
     return;
   if (isDivergent(I))
@@ -188,8 +188,8 @@
 
 // marks all users of loop-carried values of the loop headed by LoopHeader as
 // divergent
-void DivergenceAnalysis::analyzeLoopExitDivergence(const BasicBlock &DivExit,
-                                                   const Loop &OuterDivLoop) {
+void DivergenceAnalysisImpl::analyzeLoopExitDivergence(
+    const BasicBlock &DivExit, const Loop &OuterDivLoop) {
   // All users are in immediate exit blocks
   if (IsLCSSAForm) {
     for (const auto &Phi : DivExit.phis()) {
@@ -242,8 +242,8 @@
   } while (!TaintStack.empty());
 }
 
-void DivergenceAnalysis::propagateLoopExitDivergence(const BasicBlock &DivExit,
-                                                     const Loop &InnerDivLoop) {
+void DivergenceAnalysisImpl::propagateLoopExitDivergence(
+    const BasicBlock &DivExit, const Loop &InnerDivLoop) {
   LLVM_DEBUG(dbgs() << "\tpropLoopExitDiv " << DivExit.getName() << "\n");
 
   // Find outer-most loop that does not contain \p DivExit
@@ -265,7 +265,7 @@
 
 // this is a divergent join point - mark all phi nodes as divergent and push
 // them onto the stack.
-void DivergenceAnalysis::taintAndPushPhiNodes(const BasicBlock &JoinBlock) {
+void DivergenceAnalysisImpl::taintAndPushPhiNodes(const BasicBlock &JoinBlock) {
   LLVM_DEBUG(dbgs() << "taintAndPushPhiNodes in " << JoinBlock.getName()
                     << "\n");
 
@@ -287,7 +287,7 @@
   }
 }
 
-void DivergenceAnalysis::analyzeControlDivergence(const Instruction &Term) {
+void DivergenceAnalysisImpl::analyzeControlDivergence(const Instruction &Term) {
   LLVM_DEBUG(dbgs() << "analyzeControlDiv " << Term.getParent()->getName()
                     << "\n");
 
@@ -310,7 +310,7 @@
   }
 }
 
-void DivergenceAnalysis::compute() {
+void DivergenceAnalysisImpl::compute() {
   // Initialize worklist.
   auto DivValuesCopy = DivergentValues;
   for (const auto *DivVal : DivValuesCopy) {
@@ -330,63 +330,81 @@
   }
 }
 
-bool DivergenceAnalysis::isAlwaysUniform(const Value &V) const {
+bool DivergenceAnalysisImpl::isAlwaysUniform(const Value &V) const {
   return UniformOverrides.contains(&V);
 }
 
-bool DivergenceAnalysis::isDivergent(const Value &V) const {
+bool DivergenceAnalysisImpl::isDivergent(const Value &V) const {
   return DivergentValues.contains(&V);
 }
 
-bool DivergenceAnalysis::isDivergentUse(const Use &U) const {
+bool DivergenceAnalysisImpl::isDivergentUse(const Use &U) const {
   Value &V = *U.get();
   Instruction &I = *cast<Instruction>(U.getUser());
   return isDivergent(V) || isTemporalDivergent(*I.getParent(), V);
 }
 
-void DivergenceAnalysis::print(raw_ostream &OS, const Module *) const {
-  if (DivergentValues.empty())
-    return;
-  // iterate instructions using instructions() to ensure a deterministic order.
-  for (auto &I : instructions(F)) {
-    if (isDivergent(I))
-      OS << "DIVERGENT:" << I << '\n';
+DivergenceInfo::DivergenceInfo(Function &F, const DominatorTree &DT,
+                               const PostDominatorTree &PDT, const LoopInfo &LI,
+                               const TargetTransformInfo &TTI,
+                               bool KnownReducible)
+    : F(F), ContainsIrreducible(false) {
+  if (!KnownReducible) {
+    using RPOTraversal = ReversePostOrderTraversal<const Function *>;
+    RPOTraversal FuncRPOT(&F);
+    if (containsIrreducibleCFG<const BasicBlock *, const RPOTraversal,
+                               const LoopInfo>(FuncRPOT, LI)) {
+      ContainsIrreducible = true;
+      return;
+    }
   }
-}
-
-// class GPUDivergenceAnalysis
-GPUDivergenceAnalysis::GPUDivergenceAnalysis(Function &F,
-                                             const DominatorTree &DT,
-                                             const PostDominatorTree &PDT,
-                                             const LoopInfo &LI,
-                                             const TargetTransformInfo &TTI)
-    : SDA(DT, PDT, LI), DA(F, nullptr, DT, LI, SDA, /* LCSSA */ false) {
+  SDA = std::make_unique<SyncDependenceAnalysis>(DT, PDT, LI);
+  DA = std::make_unique<DivergenceAnalysisImpl>(F, nullptr, DT, LI, *SDA,
+                                                /* LCSSA */ false);
   for (auto &I : instructions(F)) {
     if (TTI.isSourceOfDivergence(&I)) {
-      DA.markDivergent(I);
+      DA->markDivergent(I);
     } else if (TTI.isAlwaysUniform(&I)) {
-      DA.addUniformOverride(I);
+      DA->addUniformOverride(I);
     }
   }
   for (auto &Arg : F.args()) {
     if (TTI.isSourceOfDivergence(&Arg)) {
-      DA.markDivergent(Arg);
+      DA->markDivergent(Arg);
     }
   }
 
-  DA.compute();
+  DA->compute();
 }
 
-bool GPUDivergenceAnalysis::isDivergent(const Value &val) const {
-  return DA.isDivergent(val);
+AnalysisKey DivergenceAnalysis::Key;
+
+DivergenceAnalysis::Result
+DivergenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
+  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
+  auto &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
+  auto &LI = AM.getResult<LoopAnalysis>(F);
+  auto &TTI = AM.getResult<TargetIRAnalysis>(F);
+
+  return DivergenceInfo(F, DT, PDT, LI, TTI, /* KnownReducible = */ false);
 }
 
-bool GPUDivergenceAnalysis::isDivergentUse(const Use &use) const {
-  return DA.isDivergentUse(use);
-}
-
-void GPUDivergenceAnalysis::print(raw_ostream &OS, const Module *mod) const {
-  OS << "Divergence of kernel " << DA.getFunction().getName() << " {\n";
-  DA.print(OS, mod);
-  OS << "}\n";
+PreservedAnalyses
+DivergenceAnalysisPrinterPass::run(Function &F, FunctionAnalysisManager &FAM) {
+  auto &DI = FAM.getResult<DivergenceAnalysis>(F);
+  OS << "'Divergence Analysis' for function '" << F.getName() << "':\n";
+  if (DI.hasDivergence()) {
+    for (auto &Arg : F.args()) {
+      OS << (DI.isDivergent(Arg) ? "DIVERGENT: " : "           ");
+      OS << Arg << "\n";
+    }
+    for (const BasicBlock &BB : F) {
+      OS << "\n           " << BB.getName() << ":\n";
+      for (auto &I : BB.instructionsWithoutDebug()) {
+        OS << (DI.isDivergent(I) ? "DIVERGENT:     " : "               ");
+        OS << I << "\n";
+      }
+    }
+  }
+  return PreservedAnalyses::all();
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/DomTreeUpdater.cpp b/src/llvm-project/llvm/lib/Analysis/DomTreeUpdater.cpp
index 8ac7d9d..6e29926 100644
--- a/src/llvm-project/llvm/lib/Analysis/DomTreeUpdater.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/DomTreeUpdater.cpp
@@ -232,6 +232,7 @@
     return;
 
   if (Strategy == UpdateStrategy::Lazy) {
+    PendUpdates.reserve(PendUpdates.size() + Updates.size());
     for (const auto &U : Updates)
       if (!isSelfDominance(U))
         PendUpdates.push_back(U);
diff --git a/src/llvm-project/llvm/lib/Analysis/EHPersonalities.cpp b/src/llvm-project/llvm/lib/Analysis/EHPersonalities.cpp
index a982f26..df8b7e1 100644
--- a/src/llvm-project/llvm/lib/Analysis/EHPersonalities.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/EHPersonalities.cpp
@@ -19,9 +19,9 @@
 /// See if the given exception handling personality function is one that we
 /// understand.  If so, return a description of it; otherwise return Unknown.
 EHPersonality llvm::classifyEHPersonality(const Value *Pers) {
-  const Function *F =
-      Pers ? dyn_cast<Function>(Pers->stripPointerCasts()) : nullptr;
-  if (!F)
+  const GlobalValue *F =
+      Pers ? dyn_cast<GlobalValue>(Pers->stripPointerCasts()) : nullptr;
+  if (!F || !F->getValueType() || !F->getValueType()->isFunctionTy())
     return EHPersonality::Unknown;
   return StringSwitch<EHPersonality>(F->getName())
       .Case("__gnat_eh_personality", EHPersonality::GNU_Ada)
diff --git a/src/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp b/src/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp
index 145baf8..d00a7c9 100644
--- a/src/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/GlobalsModRef.cpp
@@ -828,9 +828,9 @@
                                    AAQueryInfo &AAQI) {
   // Get the base object these pointers point to.
   const Value *UV1 =
-      getUnderlyingObject(LocA.Ptr->stripPointerCastsAndInvariantGroups());
+      getUnderlyingObject(LocA.Ptr->stripPointerCastsForAliasAnalysis());
   const Value *UV2 =
-      getUnderlyingObject(LocB.Ptr->stripPointerCastsAndInvariantGroups());
+      getUnderlyingObject(LocB.Ptr->stripPointerCastsForAliasAnalysis());
 
   // If either of the underlying values is a global, they may be non-addr-taken
   // globals, which we can answer queries about.
@@ -847,14 +847,14 @@
     // If the two pointers are derived from two different non-addr-taken
     // globals we know these can't alias.
     if (GV1 && GV2 && GV1 != GV2)
-      return NoAlias;
+      return AliasResult::NoAlias;
 
     // If one is and the other isn't, it isn't strictly safe but we can fake
     // this result if necessary for performance. This does not appear to be
     // a common problem in practice.
     if (EnableUnsafeGlobalsModRefAliasResults)
       if ((GV1 || GV2) && GV1 != GV2)
-        return NoAlias;
+        return AliasResult::NoAlias;
 
     // Check for a special case where a non-escaping global can be used to
     // conclude no-alias.
@@ -862,7 +862,7 @@
       const GlobalValue *GV = GV1 ? GV1 : GV2;
       const Value *UV = GV1 ? UV2 : UV1;
       if (isNonEscapingGlobalNoAlias(GV, UV))
-        return NoAlias;
+        return AliasResult::NoAlias;
     }
 
     // Otherwise if they are both derived from the same addr-taken global, we
@@ -893,14 +893,14 @@
   // use this to disambiguate the pointers. If the pointers are based on
   // different indirect globals they cannot alias.
   if (GV1 && GV2 && GV1 != GV2)
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   // If one is based on an indirect global and the other isn't, it isn't
   // strictly safe but we can fake this result if necessary for performance.
   // This does not appear to be a common problem in practice.
   if (EnableUnsafeGlobalsModRefAliasResults)
     if ((GV1 || GV2) && GV1 != GV2)
-      return NoAlias;
+      return AliasResult::NoAlias;
 
   return AAResultBase::alias(LocA, LocB, AAQI);
 }
@@ -925,7 +925,7 @@
         !all_of(Objects, [&](const Value *V) {
           return this->alias(MemoryLocation::getBeforeOrAfter(V),
                              MemoryLocation::getBeforeOrAfter(GV),
-                             AAQI) == NoAlias;
+                             AAQI) == AliasResult::NoAlias;
         }))
       return ConservativeResult;
 
diff --git a/src/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp b/src/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
index 25443a6..a6298af 100644
--- a/src/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/IRSimilarityIdentifier.cpp
@@ -176,8 +176,8 @@
 
   if (HaveLegalRange) {
     mapToIllegalUnsigned(It, IntegerMappingForBB, InstrListForBB, true);
-    for_each(InstrListForBB,
-             [this](IRInstructionData *ID) { this->IDL->push_back(*ID); });
+    for (IRInstructionData *ID : InstrListForBB)
+      this->IDL->push_back(*ID);
     llvm::append_range(InstrList, InstrListForBB);
     llvm::append_range(IntegerMapping, IntegerMappingForBB);
   }
@@ -702,7 +702,7 @@
 /// \param [out] CandsForRepSubstring - The vector to store the generated
 /// IRSimilarityCandidates.
 static void createCandidatesFromSuffixTree(
-    IRInstructionMapper Mapper, std::vector<IRInstructionData *> &InstrList,
+    const IRInstructionMapper& Mapper, std::vector<IRInstructionData *> &InstrList,
     std::vector<unsigned> &IntegerMapping, SuffixTree::RepeatedSubstring &RS,
     std::vector<IRSimilarityCandidate> &CandsForRepSubstring) {
 
@@ -835,8 +835,8 @@
   // Iterate over the subsequences found by the Suffix Tree to create
   // IRSimilarityCandidates for each repeated subsequence and determine which
   // instances are structurally similar to one another.
-  for (auto It = ST.begin(), Et = ST.end(); It != Et; ++It) {
-    createCandidatesFromSuffixTree(Mapper, InstrList, IntegerMapping, *It,
+  for (SuffixTree::RepeatedSubstring &RS : ST) {
+    createCandidatesFromSuffixTree(Mapper, InstrList, IntegerMapping, RS,
                                    CandsForRepSubstring);
 
     if (CandsForRepSubstring.size() < 2)
@@ -891,7 +891,7 @@
 }
 
 bool IRSimilarityIdentifierWrapperPass::doInitialization(Module &M) {
-  IRSI.reset(new IRSimilarityIdentifier(M));
+  IRSI.reset(new IRSimilarityIdentifier());
   return false;
 }
 
@@ -901,8 +901,7 @@
 }
 
 bool IRSimilarityIdentifierWrapperPass::runOnModule(Module &M) {
-  // All the real work is done in the constructor for the pass.
-  IRSI.reset(new IRSimilarityIdentifier(M));
+  IRSI->findSimilarity(M);
   return false;
 }
 
@@ -910,7 +909,9 @@
 IRSimilarityIdentifier IRSimilarityAnalysis::run(Module &M,
                                                ModuleAnalysisManager &) {
 
-  return IRSimilarityIdentifier(M);
+  auto IRSI = IRSimilarityIdentifier();
+  IRSI.findSimilarity(M);
+  return IRSI;
 }
 
 PreservedAnalyses
@@ -923,11 +924,16 @@
        << CandVec.begin()->getLength() << ".  Found in: \n";
     for (IRSimilarityCandidate &Cand : CandVec) {
       OS << "  Function: " << Cand.front()->Inst->getFunction()->getName().str()
-         << ",  Basic Block: ";
+         << ", Basic Block: ";
       if (Cand.front()->Inst->getParent()->getName().str() == "")
-        OS << "(unnamed)\n";
+        OS << "(unnamed)";
       else
-        OS << Cand.front()->Inst->getParent()->getName().str() << "\n";
+        OS << Cand.front()->Inst->getParent()->getName().str();
+      OS << "\n    Start Instruction: ";
+      Cand.frontInstruction()->print(OS);
+      OS << "\n      End Instruction: ";
+      Cand.backInstruction()->print(OS);
+      OS << "\n";
     }
   }
 
diff --git a/src/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp b/src/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp
index 94a24cc..fc6051b 100644
--- a/src/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/IVDescriptors.cpp
@@ -34,6 +34,8 @@
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/KnownBits.h"
 
+#include <set>
+
 using namespace llvm;
 using namespace llvm::PatternMatch;
 
@@ -189,8 +191,32 @@
   }
 }
 
+// Check if a given Phi node can be recognized as an ordered reduction for
+// vectorizing floating point operations without unsafe math.
+static bool checkOrderedReduction(RecurKind Kind, Instruction *ExactFPMathInst,
+                                  Instruction *Exit, PHINode *Phi) {
+  // Currently only FAdd is supported
+  if (Kind != RecurKind::FAdd)
+    return false;
+
+  if (Exit->getOpcode() != Instruction::FAdd || Exit != ExactFPMathInst)
+    return false;
+
+  // The only pattern accepted is the one in which the reduction PHI
+  // is used as one of the operands of the exit instruction
+  auto *LHS = Exit->getOperand(0);
+  auto *RHS = Exit->getOperand(1);
+  if (LHS != Phi && RHS != Phi)
+    return false;
+
+  LLVM_DEBUG(dbgs() << "LV: Found an ordered reduction: Phi: " << *Phi
+                    << ", ExitInst: " << *Exit << "\n");
+
+  return true;
+}
+
 bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurKind Kind,
-                                           Loop *TheLoop, bool HasFunNoNaNAttr,
+                                           Loop *TheLoop, FastMathFlags FuncFMF,
                                            RecurrenceDescriptor &RedDes,
                                            DemandedBits *DB,
                                            AssumptionCache *AC,
@@ -301,12 +327,22 @@
     // the starting value (the Phi or an AND instruction if the Phi has been
     // type-promoted).
     if (Cur != Start) {
-      ReduxDesc = isRecurrenceInstr(Cur, Kind, ReduxDesc, HasFunNoNaNAttr);
+      ReduxDesc = isRecurrenceInstr(Cur, Kind, ReduxDesc, FuncFMF);
       if (!ReduxDesc.isRecurrence())
         return false;
       // FIXME: FMF is allowed on phi, but propagation is not handled correctly.
-      if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi)
-        FMF &= ReduxDesc.getPatternInst()->getFastMathFlags();
+      if (isa<FPMathOperator>(ReduxDesc.getPatternInst()) && !IsAPhi) {
+        FastMathFlags CurFMF = ReduxDesc.getPatternInst()->getFastMathFlags();
+        if (auto *Sel = dyn_cast<SelectInst>(ReduxDesc.getPatternInst())) {
+          // Accept FMF on either fcmp or select of a min/max idiom.
+          // TODO: This is a hack to work-around the fact that FMF may not be
+          //       assigned/propagated correctly. If that problem is fixed or we
+          //       standardize on fmin/fmax via intrinsics, this can be removed.
+          if (auto *FCmp = dyn_cast<FCmpInst>(Sel->getCondition()))
+            CurFMF |= FCmp->getFastMathFlags();
+        }
+        FMF &= CurFMF;
+      }
       // Update this reduction kind if we matched a new instruction.
       // TODO: Can we eliminate the need for a 2nd InstDesc by keeping 'Kind'
       //       state accurate while processing the worklist?
@@ -406,6 +442,9 @@
   if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
     return false;
 
+  const bool IsOrdered = checkOrderedReduction(
+      Kind, ReduxDesc.getExactFPMathInst(), ExitInstruction, Phi);
+
   if (Start != Phi) {
     // If the starting value is not the same as the phi node, we speculatively
     // looked through an 'and' instruction when evaluating a potential
@@ -459,8 +498,8 @@
 
   // Save the description of this reduction variable.
   RecurrenceDescriptor RD(RdxStart, ExitInstruction, Kind, FMF,
-                          ReduxDesc.getUnsafeAlgebraInst(), RecurrenceType,
-                          IsSigned, CastInsts);
+                          ReduxDesc.getExactFPMathInst(), RecurrenceType,
+                          IsSigned, IsOrdered, CastInsts);
   RedDes = RD;
 
   return true;
@@ -554,16 +593,12 @@
 
 RecurrenceDescriptor::InstDesc
 RecurrenceDescriptor::isRecurrenceInstr(Instruction *I, RecurKind Kind,
-                                        InstDesc &Prev, bool HasFunNoNaNAttr) {
-  Instruction *UAI = Prev.getUnsafeAlgebraInst();
-  if (!UAI && isa<FPMathOperator>(I) && !I->hasAllowReassoc())
-    UAI = I; // Found an unsafe (unvectorizable) algebra instruction.
-
+                                        InstDesc &Prev, FastMathFlags FMF) {
   switch (I->getOpcode()) {
   default:
     return InstDesc(false, I);
   case Instruction::PHI:
-    return InstDesc(I, Prev.getRecKind(), Prev.getUnsafeAlgebraInst());
+    return InstDesc(I, Prev.getRecKind(), Prev.getExactFPMathInst());
   case Instruction::Sub:
   case Instruction::Add:
     return InstDesc(Kind == RecurKind::Add, I);
@@ -577,20 +612,22 @@
     return InstDesc(Kind == RecurKind::Xor, I);
   case Instruction::FDiv:
   case Instruction::FMul:
-    return InstDesc(Kind == RecurKind::FMul, I, UAI);
+    return InstDesc(Kind == RecurKind::FMul, I,
+                    I->hasAllowReassoc() ? nullptr : I);
   case Instruction::FSub:
   case Instruction::FAdd:
-    return InstDesc(Kind == RecurKind::FAdd, I, UAI);
+    return InstDesc(Kind == RecurKind::FAdd, I,
+                    I->hasAllowReassoc() ? nullptr : I);
   case Instruction::Select:
     if (Kind == RecurKind::FAdd || Kind == RecurKind::FMul)
       return isConditionalRdxPattern(Kind, I);
     LLVM_FALLTHROUGH;
   case Instruction::FCmp:
   case Instruction::ICmp:
-    if (!isIntMinMaxRecurrenceKind(Kind) &&
-        (!HasFunNoNaNAttr || !isFPMinMaxRecurrenceKind(Kind)))
-      return InstDesc(false, I);
-    return isMinMaxSelectCmpPattern(I, Prev);
+    if (isIntMinMaxRecurrenceKind(Kind) ||
+        (FMF.noNaNs() && FMF.noSignedZeros() && isFPMinMaxRecurrenceKind(Kind)))
+      return isMinMaxSelectCmpPattern(I, Prev);
+    return InstDesc(false, I);
   }
 }
 
@@ -598,9 +635,8 @@
     Instruction *I, SmallPtrSetImpl<Instruction *> &Insts,
     unsigned MaxNumUses) {
   unsigned NumUses = 0;
-  for (User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E;
-       ++Use) {
-    if (Insts.count(dyn_cast<Instruction>(*Use)))
+  for (const Use &U : I->operands()) {
+    if (Insts.count(dyn_cast<Instruction>(U)))
       ++NumUses;
     if (NumUses > MaxNumUses)
       return true;
@@ -608,6 +644,7 @@
 
   return false;
 }
+
 bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
                                           RecurrenceDescriptor &RedDes,
                                           DemandedBits *DB, AssumptionCache *AC,
@@ -615,71 +652,61 @@
 
   BasicBlock *Header = TheLoop->getHeader();
   Function &F = *Header->getParent();
-  bool HasFunNoNaNAttr =
-      F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
+  FastMathFlags FMF;
+  FMF.setNoNaNs(
+      F.getFnAttribute("no-nans-fp-math").getValueAsBool());
+  FMF.setNoSignedZeros(
+      F.getFnAttribute("no-signed-zeros-fp-math").getValueAsBool());
 
-  if (AddReductionVar(Phi, RecurKind::Add, TheLoop, HasFunNoNaNAttr, RedDes, DB,
-                      AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::Add, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, HasFunNoNaNAttr, RedDes, DB,
-                      AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::Mul, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::Or, TheLoop, HasFunNoNaNAttr, RedDes, DB,
-                      AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::Or, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::And, TheLoop, HasFunNoNaNAttr, RedDes, DB,
-                      AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::And, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, HasFunNoNaNAttr, RedDes, DB,
-                      AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::Xor, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::SMax, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a SMAX reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::SMin, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a SMIN reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::UMax, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a UMAX reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::UMin, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a UMIN reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::FMul, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::FAdd, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::FMax, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a float MAX reduction PHI." << *Phi << "\n");
     return true;
   }
-  if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, HasFunNoNaNAttr, RedDes,
-                      DB, AC, DT)) {
+  if (AddReductionVar(Phi, RecurKind::FMin, TheLoop, FMF, RedDes, DB, AC, DT)) {
     LLVM_DEBUG(dbgs() << "Found a float MIN reduction PHI." << *Phi << "\n");
     return true;
   }
@@ -689,7 +716,7 @@
 
 bool RecurrenceDescriptor::isFirstOrderRecurrence(
     PHINode *Phi, Loop *TheLoop,
-    DenseMap<Instruction *, Instruction *> &SinkAfter, DominatorTree *DT) {
+    MapVector<Instruction *, Instruction *> &SinkAfter, DominatorTree *DT) {
 
   // Ensure the phi node is in the loop header and has two incoming values.
   if (Phi->getParent() != TheLoop->getHeader() ||
@@ -715,56 +742,82 @@
       SinkAfter.count(Previous)) // Cannot rely on dominance due to motion.
     return false;
 
-  // Ensure every user of the phi node is dominated by the previous value.
-  // The dominance requirement ensures the loop vectorizer will not need to
-  // vectorize the initial value prior to the first iteration of the loop.
-  // TODO: Consider extending this sinking to handle memory instructions and
-  // phis with multiple users.
+  // Ensure every user of the phi node (recursively) is dominated by the
+  // previous value. The dominance requirement ensures the loop vectorizer will
+  // not need to vectorize the initial value prior to the first iteration of the
+  // loop.
+  // TODO: Consider extending this sinking to handle memory instructions.
 
-  // Returns true, if all users of I are dominated by DominatedBy.
-  auto allUsesDominatedBy = [DT](Instruction *I, Instruction *DominatedBy) {
-    return all_of(I->uses(), [DT, DominatedBy](Use &U) {
-      return DT->dominates(DominatedBy, U);
-    });
+  // We optimistically assume we can sink all users after Previous. Keep a set
+  // of instructions to sink after Previous ordered by dominance in the common
+  // basic block. It will be applied to SinkAfter if all users can be sunk.
+  auto CompareByComesBefore = [](const Instruction *A, const Instruction *B) {
+    return A->comesBefore(B);
   };
+  std::set<Instruction *, decltype(CompareByComesBefore)> InstrsToSink(
+      CompareByComesBefore);
 
-  if (Phi->hasOneUse()) {
-    Instruction *I = Phi->user_back();
+  BasicBlock *PhiBB = Phi->getParent();
+  SmallVector<Instruction *, 8> WorkList;
+  auto TryToPushSinkCandidate = [&](Instruction *SinkCandidate) {
+    // Already sunk SinkCandidate.
+    if (SinkCandidate->getParent() == PhiBB &&
+        InstrsToSink.find(SinkCandidate) != InstrsToSink.end())
+      return true;
 
-    // If the user of the PHI is also the incoming value, we potentially have a
-    // reduction and which cannot be handled by sinking.
-    if (Previous == I)
+    // Cyclic dependence.
+    if (Previous == SinkCandidate)
       return false;
 
-    // We cannot sink terminator instructions.
-    if (I->getParent()->getTerminator() == I)
+    if (DT->dominates(Previous,
+                      SinkCandidate)) // We already are good w/o sinking.
+      return true;
+
+    if (SinkCandidate->getParent() != PhiBB ||
+        SinkCandidate->mayHaveSideEffects() ||
+        SinkCandidate->mayReadFromMemory() || SinkCandidate->isTerminator())
       return false;
 
     // Do not try to sink an instruction multiple times (if multiple operands
     // are first order recurrences).
     // TODO: We can support this case, by sinking the instruction after the
     // 'deepest' previous instruction.
-    if (SinkAfter.find(I) != SinkAfter.end())
+    if (SinkAfter.find(SinkCandidate) != SinkAfter.end())
       return false;
 
-    if (DT->dominates(Previous, I)) // We already are good w/o sinking.
+    // If we reach a PHI node that is not dominated by Previous, we reached a
+    // header PHI. No need for sinking.
+    if (isa<PHINode>(SinkCandidate))
       return true;
 
-    // We can sink any instruction without side effects, as long as all users
-    // are dominated by the instruction we are sinking after.
-    if (I->getParent() == Phi->getParent() && !I->mayHaveSideEffects() &&
-        allUsesDominatedBy(I, Previous)) {
-      SinkAfter[I] = Previous;
-      return true;
+    // Sink User tentatively and check its users
+    InstrsToSink.insert(SinkCandidate);
+    WorkList.push_back(SinkCandidate);
+    return true;
+  };
+
+  WorkList.push_back(Phi);
+  // Try to recursively sink instructions and their users after Previous.
+  while (!WorkList.empty()) {
+    Instruction *Current = WorkList.pop_back_val();
+    for (User *User : Current->users()) {
+      if (!TryToPushSinkCandidate(cast<Instruction>(User)))
+        return false;
     }
   }
 
-  return allUsesDominatedBy(Phi, Previous);
+  // We can sink all users of Phi. Update the mapping.
+  for (Instruction *I : InstrsToSink) {
+    SinkAfter[I] = Previous;
+    Previous = I;
+  }
+  return true;
 }
 
 /// This function returns the identity element (or neutral element) for
 /// the operation K.
-Constant *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp) {
+Constant *RecurrenceDescriptor::getRecurrenceIdentity(RecurKind K, Type *Tp,
+                                                      FastMathFlags FMF) {
   switch (K) {
   case RecurKind::Xor:
   case RecurKind::Add:
@@ -782,7 +835,14 @@
     return ConstantFP::get(Tp, 1.0L);
   case RecurKind::FAdd:
     // Adding zero to a number does not change it.
-    return ConstantFP::get(Tp, 0.0L);
+    // FIXME: Ideally we should not need to check FMF for FAdd and should always
+    // use -0.0. However, this will currently result in mixed vectors of 0.0/-0.0.
+    // Instead, we should ensure that 1) the FMF from FAdd are propagated to the PHI
+    // nodes where possible, and 2) PHIs with the nsz flag + -0.0 use 0.0. This would
+    // mean we can then remove the check for noSignedZeros() below (see D98963).
+    if (FMF.noSignedZeros())
+      return ConstantFP::get(Tp, 0.0L);
+    return ConstantFP::get(Tp, -0.0L);
   case RecurKind::UMin:
     return ConstantInt::get(Tp, -1);
   case RecurKind::UMax:
diff --git a/src/llvm-project/llvm/lib/Analysis/IVUsers.cpp b/src/llvm-project/llvm/lib/Analysis/IVUsers.cpp
index 9432696..db6cff7 100644
--- a/src/llvm-project/llvm/lib/Analysis/IVUsers.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/IVUsers.cpp
@@ -101,12 +101,12 @@
     BasicBlock *DomBB = Rung->getBlock();
     Loop *DomLoop = LI->getLoopFor(DomBB);
     if (DomLoop && DomLoop->getHeader() == DomBB) {
-      // If the domtree walk reaches a loop with no preheader, return false.
-      if (!DomLoop->isLoopSimplifyForm())
-        return false;
       // If we have already checked this loop nest, stop checking.
       if (SimpleLoopNests.count(DomLoop))
         break;
+      // If the domtree walk reaches a loop with no preheader, return false.
+      if (!DomLoop->isLoopSimplifyForm())
+        return false;
       // If we have not already checked this loop nest, remember the loop
       // header nearest to BB. The nearest loop may not contain BB.
       if (!NearestLoop)
diff --git a/src/llvm-project/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp b/src/llvm-project/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp
index a7b5fda..48eeb87 100644
--- a/src/llvm-project/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ImportedFunctionsInliningStatistics.cpp
@@ -19,6 +19,8 @@
 #include <algorithm>
 #include <iomanip>
 #include <sstream>
+#include <string>
+
 using namespace llvm;
 
 cl::opt<InlinerFunctionImportStatsOpts> InlinerFunctionImportStats(
diff --git a/src/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp b/src/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp
index 9a2276a..a8ad2d6 100644
--- a/src/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/InlineAdvisor.cpp
@@ -24,8 +24,6 @@
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/raw_ostream.h"
 
-#include <sstream>
-
 using namespace llvm;
 #define DEBUG_TYPE "inline"
 
@@ -159,6 +157,7 @@
   auto &FAM = MAM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
   switch (Mode) {
   case InliningAdvisorMode::Default:
+    LLVM_DEBUG(dbgs() << "Using default inliner heuristic.\n");
     Advisor.reset(new DefaultInlineAdvisor(M, FAM, Params));
     // Restrict replay to default advisor, ML advisors are stateful so
     // replay will need augmentations to interleave with them correctly.
@@ -170,6 +169,7 @@
     break;
   case InliningAdvisorMode::Development:
 #ifdef LLVM_HAVE_TF_API
+    LLVM_DEBUG(dbgs() << "Using development-mode inliner policy.\n");
     Advisor =
         llvm::getDevelopmentModeAdvisor(M, MAM, [&FAM, Params](CallBase &CB) {
           auto OIC = getDefaultInlineAdvice(CB, FAM, Params);
@@ -179,6 +179,7 @@
     break;
   case InliningAdvisorMode::Release:
 #ifdef LLVM_HAVE_TF_AOT
+    LLVM_DEBUG(dbgs() << "Using release-mode inliner policy.\n");
     Advisor = llvm::getReleaseModeAdvisor(M, MAM);
 #endif
     break;
@@ -279,8 +280,7 @@
 }
 
 namespace llvm {
-static std::basic_ostream<char> &operator<<(std::basic_ostream<char> &R,
-                                            const ore::NV &Arg) {
+static raw_ostream &operator<<(raw_ostream &R, const ore::NV &Arg) {
   return R << Arg.Val;
 }
 
@@ -302,7 +302,8 @@
 } // namespace llvm
 
 std::string llvm::inlineCostStr(const InlineCost &IC) {
-  std::stringstream Remark;
+  std::string Buffer;
+  raw_string_ostream Remark(Buffer);
   Remark << IC;
   return Remark.str();
 }
@@ -383,7 +384,8 @@
 }
 
 std::string llvm::getCallSiteLocation(DebugLoc DLoc) {
-  std::ostringstream CallSiteLoc;
+  std::string Buffer;
+  raw_string_ostream CallSiteLoc(Buffer);
   bool First = true;
   for (DILocation *DIL = DLoc.get(); DIL; DIL = DIL->getInlinedAt()) {
     if (!First)
diff --git a/src/llvm-project/llvm/lib/Analysis/InlineCost.cpp b/src/llvm-project/llvm/lib/Analysis/InlineCost.cpp
index a35f5e1..4c2413e 100644
--- a/src/llvm-project/llvm/lib/Analysis/InlineCost.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/InlineCost.cpp
@@ -113,6 +113,10 @@
              "entry frequency, for a callsite to be hot in the absence of "
              "profile information."));
 
+static cl::opt<int> CallPenalty(
+    "inline-call-penalty", cl::Hidden, cl::init(25),
+    cl::desc("Call penalty that is applied per callsite when inlining"));
+
 static cl::opt<bool> OptComputeFullInlineCost(
     "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
     cl::desc("Compute the full inline cost of a call site even when the cost "
@@ -390,7 +394,6 @@
   bool visitPtrToInt(PtrToIntInst &I);
   bool visitIntToPtr(IntToPtrInst &I);
   bool visitCastInst(CastInst &I);
-  bool visitUnaryInstruction(UnaryInstruction &I);
   bool visitCmpInst(CmpInst &I);
   bool visitSub(BinaryOperator &I);
   bool visitBinaryOperator(BinaryOperator &I);
@@ -411,19 +414,18 @@
   bool visitUnreachableInst(UnreachableInst &I);
 
 public:
-  CallAnalyzer(
-      Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
-      function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
-      function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
-      ProfileSummaryInfo *PSI = nullptr,
-      OptimizationRemarkEmitter *ORE = nullptr)
+  CallAnalyzer(Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
+               function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
+               function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
+               ProfileSummaryInfo *PSI = nullptr,
+               OptimizationRemarkEmitter *ORE = nullptr)
       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
         PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
         CandidateCall(Call), EnableLoadElimination(true) {}
 
   InlineResult analyze();
 
-  Optional<Constant*> getSimplifiedValue(Instruction *I) {
+  Optional<Constant *> getSimplifiedValue(Instruction *I) {
     if (SimplifiedValues.find(I) != SimplifiedValues.end())
       return SimplifiedValues[I];
     return None;
@@ -441,6 +443,25 @@
   void dump();
 };
 
+// Considering forming a binary search, we should find the number of nodes
+// which is same as the number of comparisons when lowered. For a given
+// number of clusters, n, we can define a recursive function, f(n), to find
+// the number of nodes in the tree. The recursion is :
+// f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
+// and f(n) = n, when n <= 3.
+// This will lead a binary tree where the leaf should be either f(2) or f(3)
+// when n > 3.  So, the number of comparisons from leaves should be n, while
+// the number of non-leaf should be :
+//   2^(log2(n) - 1) - 1
+//   = 2^log2(n) * 2^-1 - 1
+//   = n / 2 - 1.
+// Considering comparisons from leaf and non-leaf nodes, we can estimate the
+// number of comparisons in a simple closed form :
+//   n + n / 2 - 1 = n * 3 / 2 - 1
+int64_t getExpectedNumberOfCompare(int NumCaseCluster) {
+  return 3 * static_cast<int64_t>(NumCaseCluster) / 2 - 1;
+}
+
 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
 class InlineCostCallAnalyzer final : public CallAnalyzer {
@@ -489,6 +510,12 @@
   // sense that it's not weighted by profile counts at all.
   int ColdSize = 0;
 
+  // Whether inlining is decided by cost-benefit analysis.
+  bool DecidedByCostBenefit = false;
+
+  // The cost-benefit pair computed by cost-benefit analysis.
+  Optional<CostBenefitPair> CostBenefit = None;
+
   bool SingleBB = true;
 
   unsigned SROACostSavings = 0;
@@ -514,7 +541,7 @@
   /// Handle a capped 'int' increment for Cost.
   void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
     assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
-    Cost = (int)std::min(UpperBound, Cost + Inc);
+    Cost = std::min<int>(UpperBound, Cost + Inc);
   }
 
   void onDisableSROA(AllocaInst *Arg) override {
@@ -531,7 +558,7 @@
     addCost(LoadEliminationCost);
     LoadEliminationCost = 0;
   }
-  void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
+  void onCallPenalty() override { addCost(CallPenalty); }
   void onCallArgumentSetup(const CallBase &Call) override {
     // Pay the price of the argument setup. We account for the average 1
     // instruction per call argument setup here.
@@ -566,7 +593,7 @@
       }
     } else
       // Otherwise simply add the cost for merely making the call.
-      addCost(InlineConstants::CallPenalty);
+      addCost(CallPenalty);
   }
 
   void onFinalizeSwitch(unsigned JumpTableSize,
@@ -575,38 +602,26 @@
     // branch to destination.
     // Maximum valid cost increased in this function.
     if (JumpTableSize) {
-      int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
-                       4 * InlineConstants::InstrCost;
+      int64_t JTCost =
+          static_cast<int64_t>(JumpTableSize) * InlineConstants::InstrCost +
+          4 * InlineConstants::InstrCost;
 
-      addCost(JTCost, (int64_t)CostUpperBound);
+      addCost(JTCost, static_cast<int64_t>(CostUpperBound));
       return;
     }
-    // Considering forming a binary search, we should find the number of nodes
-    // which is same as the number of comparisons when lowered. For a given
-    // number of clusters, n, we can define a recursive function, f(n), to find
-    // the number of nodes in the tree. The recursion is :
-    // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
-    // and f(n) = n, when n <= 3.
-    // This will lead a binary tree where the leaf should be either f(2) or f(3)
-    // when n > 3.  So, the number of comparisons from leaves should be n, while
-    // the number of non-leaf should be :
-    //   2^(log2(n) - 1) - 1
-    //   = 2^log2(n) * 2^-1 - 1
-    //   = n / 2 - 1.
-    // Considering comparisons from leaf and non-leaf nodes, we can estimate the
-    // number of comparisons in a simple closed form :
-    //   n + n / 2 - 1 = n * 3 / 2 - 1
+
     if (NumCaseCluster <= 3) {
       // Suppose a comparison includes one compare and one conditional branch.
       addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
       return;
     }
 
-    int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
+    int64_t ExpectedNumberOfCompare =
+        getExpectedNumberOfCompare(NumCaseCluster);
     int64_t SwitchCost =
         ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
 
-    addCost(SwitchCost, (int64_t)CostUpperBound);
+    addCost(SwitchCost, static_cast<int64_t>(CostUpperBound));
   }
   void onMissedSimplification() override {
     addCost(InlineConstants::InstrCost);
@@ -672,15 +687,22 @@
   }
 
   bool isCostBenefitAnalysisEnabled() {
-    if (!InlineEnableCostBenefitAnalysis)
-      return false;
-
     if (!PSI || !PSI->hasProfileSummary())
       return false;
 
     if (!GetBFI)
       return false;
 
+    if (InlineEnableCostBenefitAnalysis.getNumOccurrences()) {
+      // Honor the explicit request from the user.
+      if (!InlineEnableCostBenefitAnalysis)
+        return false;
+    } else {
+      // Otherwise, require instrumentation profile.
+      if (!PSI->hasInstrumentationProfile())
+        return false;
+    }
+
     auto *Caller = CandidateCall.getParent()->getParent();
     if (!Caller->getEntryCount())
       return false;
@@ -693,7 +715,9 @@
     if (!PSI->isHotCallSite(CandidateCall, CallerBFI))
       return false;
 
-    if (!F.getEntryCount())
+    // Make sure we have a nonzero entry count.
+    auto EntryCount = F.getEntryCount();
+    if (!EntryCount || !EntryCount.getCount())
       return false;
 
     BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
@@ -749,9 +773,6 @@
             CurrentSavings += InlineConstants::InstrCost;
           }
         }
-        // TODO: Consider other forms of savings like switch statements,
-        // indirect calls becoming direct, SROACostSavings, LoadEliminationCost,
-        // etc.
       }
 
       auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
@@ -762,7 +783,7 @@
 
     // Compute the cycle savings per call.
     auto EntryProfileCount = F.getEntryCount();
-    assert(EntryProfileCount.hasValue());
+    assert(EntryProfileCount.hasValue() && EntryProfileCount.getCount());
     auto EntryCount = EntryProfileCount.getCount();
     CycleSavings += EntryCount / 2;
     CycleSavings = CycleSavings.udiv(EntryCount);
@@ -780,6 +801,8 @@
     // savings threshold.
     Size = Size > InlineSizeAllowance ? Size - InlineSizeAllowance : 1;
 
+    CostBenefit.emplace(APInt(128, Size), CycleSavings);
+
     // Return true if the savings justify the cost of inlining.  Specifically,
     // we evaluate the following inequality:
     //
@@ -813,7 +836,7 @@
           continue;
         NumLoops++;
       }
-      addCost(NumLoops * InlineConstants::CallPenalty);
+      addCost(NumLoops * InlineConstants::LoopPenalty);
     }
 
     // We applied the maximum possible vector bonus at the beginning. Now,
@@ -825,6 +848,7 @@
       Threshold -= VectorBonus / 2;
 
     if (auto Result = costBenefitAnalysis()) {
+      DecidedByCostBenefit = true;
       if (Result.getValue())
         return InlineResult::success();
       else
@@ -924,9 +948,213 @@
   }
 
   virtual ~InlineCostCallAnalyzer() {}
-  int getThreshold() { return Threshold; }
-  int getCost() { return Cost; }
+  int getThreshold() const { return Threshold; }
+  int getCost() const { return Cost; }
+  Optional<CostBenefitPair> getCostBenefitPair() { return CostBenefit; }
+  bool wasDecidedByCostBenefit() const { return DecidedByCostBenefit; }
 };
+
+class InlineCostFeaturesAnalyzer final : public CallAnalyzer {
+private:
+  InlineCostFeatures Cost = {};
+
+  // FIXME: These constants are taken from the heuristic-based cost visitor.
+  // These should be removed entirely in a later revision to avoid reliance on
+  // heuristics in the ML inliner.
+  static constexpr int JTCostMultiplier = 4;
+  static constexpr int CaseClusterCostMultiplier = 2;
+  static constexpr int SwitchCostMultiplier = 2;
+
+  // FIXME: These are taken from the heuristic-based cost visitor: we should
+  // eventually abstract these to the CallAnalyzer to avoid duplication.
+  unsigned SROACostSavingOpportunities = 0;
+  int VectorBonus = 0;
+  int SingleBBBonus = 0;
+  int Threshold = 5;
+
+  DenseMap<AllocaInst *, unsigned> SROACosts;
+
+  void increment(InlineCostFeatureIndex Feature, int64_t Delta = 1) {
+    Cost[static_cast<size_t>(Feature)] += Delta;
+  }
+
+  void set(InlineCostFeatureIndex Feature, int64_t Value) {
+    Cost[static_cast<size_t>(Feature)] = Value;
+  }
+
+  void onDisableSROA(AllocaInst *Arg) override {
+    auto CostIt = SROACosts.find(Arg);
+    if (CostIt == SROACosts.end())
+      return;
+
+    increment(InlineCostFeatureIndex::SROALosses, CostIt->second);
+    SROACostSavingOpportunities -= CostIt->second;
+    SROACosts.erase(CostIt);
+  }
+
+  void onDisableLoadElimination() override {
+    set(InlineCostFeatureIndex::LoadElimination, 1);
+  }
+
+  void onCallPenalty() override {
+    increment(InlineCostFeatureIndex::CallPenalty, CallPenalty);
+  }
+
+  void onCallArgumentSetup(const CallBase &Call) override {
+    increment(InlineCostFeatureIndex::CallArgumentSetup,
+              Call.arg_size() * InlineConstants::InstrCost);
+  }
+
+  void onLoadRelativeIntrinsic() override {
+    increment(InlineCostFeatureIndex::LoadRelativeIntrinsic,
+              3 * InlineConstants::InstrCost);
+  }
+
+  void onLoweredCall(Function *F, CallBase &Call,
+                     bool IsIndirectCall) override {
+    increment(InlineCostFeatureIndex::LoweredCallArgSetup,
+              Call.arg_size() * InlineConstants::InstrCost);
+
+    if (IsIndirectCall) {
+      InlineParams IndirectCallParams = {/* DefaultThreshold*/ 0,
+                                         /*HintThreshold*/ {},
+                                         /*ColdThreshold*/ {},
+                                         /*OptSizeThreshold*/ {},
+                                         /*OptMinSizeThreshold*/ {},
+                                         /*HotCallSiteThreshold*/ {},
+                                         /*LocallyHotCallSiteThreshold*/ {},
+                                         /*ColdCallSiteThreshold*/ {},
+                                         /*ComputeFullInlineCost*/ true,
+                                         /*EnableDeferral*/ true};
+      IndirectCallParams.DefaultThreshold =
+          InlineConstants::IndirectCallThreshold;
+
+      InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
+                                GetAssumptionCache, GetBFI, PSI, ORE, false,
+                                true);
+      if (CA.analyze().isSuccess()) {
+        increment(InlineCostFeatureIndex::NestedInlineCostEstimate,
+                  CA.getCost());
+        increment(InlineCostFeatureIndex::NestedInlines, 1);
+      }
+    } else {
+      onCallPenalty();
+    }
+  }
+
+  void onFinalizeSwitch(unsigned JumpTableSize,
+                        unsigned NumCaseCluster) override {
+
+    if (JumpTableSize) {
+      int64_t JTCost =
+          static_cast<int64_t>(JumpTableSize) * InlineConstants::InstrCost +
+          JTCostMultiplier * InlineConstants::InstrCost;
+      increment(InlineCostFeatureIndex::JumpTablePenalty, JTCost);
+      return;
+    }
+
+    if (NumCaseCluster <= 3) {
+      increment(InlineCostFeatureIndex::CaseClusterPenalty,
+                NumCaseCluster * CaseClusterCostMultiplier *
+                    InlineConstants::InstrCost);
+      return;
+    }
+
+    int64_t ExpectedNumberOfCompare =
+        getExpectedNumberOfCompare(NumCaseCluster);
+
+    int64_t SwitchCost = ExpectedNumberOfCompare * SwitchCostMultiplier *
+                         InlineConstants::InstrCost;
+    increment(InlineCostFeatureIndex::SwitchPenalty, SwitchCost);
+  }
+
+  void onMissedSimplification() override {
+    increment(InlineCostFeatureIndex::UnsimplifiedCommonInstructions,
+              InlineConstants::InstrCost);
+  }
+
+  void onInitializeSROAArg(AllocaInst *Arg) override { SROACosts[Arg] = 0; }
+  void onAggregateSROAUse(AllocaInst *Arg) override {
+    SROACosts.find(Arg)->second += InlineConstants::InstrCost;
+    SROACostSavingOpportunities += InlineConstants::InstrCost;
+  }
+
+  void onBlockAnalyzed(const BasicBlock *BB) override {
+    if (BB->getTerminator()->getNumSuccessors() > 1)
+      set(InlineCostFeatureIndex::IsMultipleBlocks, 1);
+    Threshold -= SingleBBBonus;
+  }
+
+  InlineResult finalizeAnalysis() override {
+    auto *Caller = CandidateCall.getFunction();
+    if (Caller->hasMinSize()) {
+      DominatorTree DT(F);
+      LoopInfo LI(DT);
+      for (Loop *L : LI) {
+        // Ignore loops that will not be executed
+        if (DeadBlocks.count(L->getHeader()))
+          continue;
+        increment(InlineCostFeatureIndex::NumLoops,
+                  InlineConstants::LoopPenalty);
+      }
+    }
+    set(InlineCostFeatureIndex::DeadBlocks, DeadBlocks.size());
+    set(InlineCostFeatureIndex::SimplifiedInstructions,
+        NumInstructionsSimplified);
+    set(InlineCostFeatureIndex::ConstantArgs, NumConstantArgs);
+    set(InlineCostFeatureIndex::ConstantOffsetPtrArgs,
+        NumConstantOffsetPtrArgs);
+    set(InlineCostFeatureIndex::SROASavings, SROACostSavingOpportunities);
+
+    if (NumVectorInstructions <= NumInstructions / 10)
+      Threshold -= VectorBonus;
+    else if (NumVectorInstructions <= NumInstructions / 2)
+      Threshold -= VectorBonus / 2;
+
+    set(InlineCostFeatureIndex::Threshold, Threshold);
+
+    return InlineResult::success();
+  }
+
+  bool shouldStop() override { return false; }
+
+  void onLoadEliminationOpportunity() override {
+    increment(InlineCostFeatureIndex::LoadElimination, 1);
+  }
+
+  InlineResult onAnalysisStart() override {
+    increment(InlineCostFeatureIndex::CallSiteCost,
+              -1 * getCallsiteCost(this->CandidateCall, DL));
+
+    set(InlineCostFeatureIndex::ColdCcPenalty,
+        (F.getCallingConv() == CallingConv::Cold));
+
+    // FIXME: we shouldn't repeat this logic in both the Features and Cost
+    // analyzer - instead, we should abstract it to a common method in the
+    // CallAnalyzer
+    int SingleBBBonusPercent = 50;
+    int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
+    Threshold += TTI.adjustInliningThreshold(&CandidateCall);
+    Threshold *= TTI.getInliningThresholdMultiplier();
+    SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
+    VectorBonus = Threshold * VectorBonusPercent / 100;
+    Threshold += (SingleBBBonus + VectorBonus);
+
+    return InlineResult::success();
+  }
+
+public:
+  InlineCostFeaturesAnalyzer(
+      const TargetTransformInfo &TTI,
+      function_ref<AssumptionCache &(Function &)> &GetAssumptionCache,
+      function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
+      ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE, Function &Callee,
+      CallBase &Call)
+      : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI) {}
+
+  const InlineCostFeatures &features() const { return Cost; }
+};
+
 } // namespace
 
 /// Test whether the given value is an Alloca-derived function argument.
@@ -940,8 +1168,8 @@
   disableLoadElimination();
 }
 
-void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
-                                                formatted_raw_ostream &OS) {
+void InlineCostAnnotationWriter::emitInstructionAnnot(
+    const Instruction *I, formatted_raw_ostream &OS) {
   // The cost of inlining of the given instruction is printed always.
   // The threshold delta is printed only when it is non-zero. It happens
   // when we decided to give a bonus at a particular instruction.
@@ -1023,12 +1251,14 @@
       Operands.push_back(SimpleOp);
     else
       Operands.push_back(Op);
-  return TargetTransformInfo::TCC_Free ==
-         TTI.getUserCost(&GEP, Operands,
-                         TargetTransformInfo::TCK_SizeAndLatency);
+  return TTI.getUserCost(&GEP, Operands,
+                         TargetTransformInfo::TCK_SizeAndLatency) ==
+         TargetTransformInfo::TCC_Free;
 }
 
 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
+  disableSROA(I.getOperand(0));
+
   // Check whether inlining will turn a dynamic alloca into a static
   // alloca and handle that case.
   if (I.isArrayAllocation()) {
@@ -1044,13 +1274,11 @@
       // is needed to track stack usage during inlining.
       Type *Ty = I.getAllocatedType();
       AllocatedSize = SaturatingMultiplyAdd(
-          AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getKnownMinSize(),
-          AllocatedSize);
-      if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) {
+          AllocSize->getLimitedValue(),
+          DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
+      if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline)
         HasDynamicAlloca = true;
-        return false;
-      }
-      return Base::visitAlloca(I);
+      return false;
     }
   }
 
@@ -1061,15 +1289,13 @@
         SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
   }
 
-  // We will happily inline static alloca instructions.
-  if (I.isStaticAlloca())
-    return Base::visitAlloca(I);
-
   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
   // a variety of reasons, and so we would like to not inline them into
   // functions which don't currently have a dynamic alloca. This simply
   // disables inlining altogether in the presence of a dynamic alloca.
-  HasDynamicAlloca = true;
+  if (!I.isStaticAlloca())
+    HasDynamicAlloca = true;
+
   return false;
 }
 
@@ -1202,11 +1428,11 @@
 
   if (!DisableGEPConstOperand)
     if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
-        SmallVector<Constant *, 2> Indices;
-        for (unsigned int Index = 1 ; Index < COps.size() ; ++Index)
+          SmallVector<Constant *, 2> Indices;
+          for (unsigned int Index = 1; Index < COps.size(); ++Index)
             Indices.push_back(COps[Index]);
-        return ConstantExpr::getGetElementPtr(I.getSourceElementType(), COps[0],
-                                              Indices, I.isInBounds());
+          return ConstantExpr::getGetElementPtr(
+              I.getSourceElementType(), COps[0], Indices, I.isInBounds());
         }))
       return true;
 
@@ -1295,8 +1521,8 @@
   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
     SROAArgValues[&I] = SROAArg;
 
-  return TargetTransformInfo::TCC_Free ==
-         TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
+  return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
+         TargetTransformInfo::TCC_Free;
 }
 
 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
@@ -1320,8 +1546,8 @@
   if (auto *SROAArg = getSROAArgForValueOrNull(Op))
     SROAArgValues[&I] = SROAArg;
 
-  return TargetTransformInfo::TCC_Free ==
-         TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
+  return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
+         TargetTransformInfo::TCC_Free;
 }
 
 bool CallAnalyzer::visitCastInst(CastInst &I) {
@@ -1352,21 +1578,8 @@
     break;
   }
 
-  return TargetTransformInfo::TCC_Free ==
-         TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
-}
-
-bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
-  Value *Operand = I.getOperand(0);
-  if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
-        return ConstantFoldInstOperands(&I, COps[0], DL);
-      }))
-    return true;
-
-  // Disable any SROA on the argument to arbitrary unary instructions.
-  disableSROA(Operand);
-
-  return false;
+  return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
+         TargetTransformInfo::TCC_Free;
 }
 
 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
@@ -1577,10 +1790,11 @@
     }
   }
 
+  Threshold += TTI.adjustInliningThreshold(&Call);
+
   // Finally, take the target-specific inlining threshold multiplier into
   // account.
   Threshold *= TTI.getInliningThresholdMultiplier();
-  Threshold += TTI.adjustInliningThreshold(&Call);
 
   SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
   VectorBonus = Threshold * VectorBonusPercent / 100;
@@ -1763,8 +1977,8 @@
       }))
     return true;
 
-  // SROA can look through these but give them a cost.
-  return false;
+  // SROA can't look through these, but they may be free.
+  return Base::visitExtractValue(I);
 }
 
 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
@@ -1776,8 +1990,8 @@
       }))
     return true;
 
-  // SROA can look through these but give them a cost.
-  return false;
+  // SROA can't look through these, but they may be free.
+  return Base::visitInsertValue(I);
 }
 
 /// Try to simplify a call site.
@@ -1872,6 +2086,11 @@
     case Intrinsic::vastart:
       InitsVargArgs = true;
       return false;
+    case Intrinsic::launder_invariant_group:
+    case Intrinsic::strip_invariant_group:
+      if (auto *SROAArg = getSROAArgForValueOrNull(II->getOperand(0)))
+        SROAArgValues[II] = SROAArg;
+      return true;
     }
   }
 
@@ -1948,9 +2167,9 @@
   }
 
   // Select condition is a constant.
-  Value *SelectedV = CondC->isAllOnesValue()
-                         ? TrueVal
-                         : (CondC->isNullValue()) ? FalseVal : nullptr;
+  Value *SelectedV = CondC->isAllOnesValue()  ? TrueVal
+                     : (CondC->isNullValue()) ? FalseVal
+                                              : nullptr;
   if (!SelectedV) {
     // Condition is a vector constant that is not all 1s or all 0s.  If all
     // operands are constants, ConstantExpr::getSelect() can handle the cases
@@ -2002,7 +2221,7 @@
   // proportional to the size of the tree or the size of jump table range.
   //
   // NB: We convert large switches which are just used to initialize large phi
-  // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
+  // nodes to lookup tables instead in simplifycfg, so this shouldn't prevent
   // inlining those. It will prevent inlining in cases where the optimization
   // does not (yet) fire.
 
@@ -2056,8 +2275,8 @@
 bool CallAnalyzer::visitInstruction(Instruction &I) {
   // Some instructions are free. All of the free intrinsics can also be
   // handled by SROA, etc.
-  if (TargetTransformInfo::TCC_Free ==
-      TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
+  if (TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
+      TargetTransformInfo::TCC_Free)
     return true;
 
   // We found something we don't understand or can't handle. Mark any SROA-able
@@ -2415,9 +2634,7 @@
 
 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
 /// Dump stats about this call's analysis.
-LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
-  print();
-}
+LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() { print(); }
 #endif
 
 /// Test that there are no attribute conflicts between Caller and Callee
@@ -2443,7 +2660,7 @@
       // We approximate the number of loads and stores needed by dividing the
       // size of the byval type by the target's pointer size.
       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
-      unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
+      unsigned TypeSize = DL.getTypeSizeInBits(Call.getParamByValType(I));
       unsigned AS = PTy->getAddressSpace();
       unsigned PointerSize = DL.getPointerSizeInBits(AS);
       // Ceiling division.
@@ -2465,7 +2682,7 @@
     }
   }
   // The call instruction also disappears after inlining.
-  Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
+  Cost += InlineConstants::InstrCost + CallPenalty;
   return Cost;
 }
 
@@ -2504,6 +2721,19 @@
   return CA.getCost();
 }
 
+Optional<InlineCostFeatures> llvm::getInliningCostFeatures(
+    CallBase &Call, TargetTransformInfo &CalleeTTI,
+    function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
+    function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
+    ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
+  InlineCostFeaturesAnalyzer CFA(CalleeTTI, GetAssumptionCache, GetBFI, PSI,
+                                 ORE, *Call.getCalledFunction(), Call);
+  auto R = CFA.analyze();
+  if (!R.isSuccess())
+    return None;
+  return CFA.features();
+}
+
 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
@@ -2608,6 +2838,17 @@
 
   LLVM_DEBUG(CA.dump());
 
+  // Always make cost benefit based decision explicit.
+  // We use always/never here since threshold is not meaningful,
+  // as it's not what drives cost-benefit analysis.
+  if (CA.wasDecidedByCostBenefit()) {
+    if (ShouldInline.isSuccess())
+      return InlineCost::getAlways("benefit over cost",
+                                   CA.getCostBenefitPair());
+    else
+      return InlineCost::getNever("cost over benefit", CA.getCostBenefitPair());
+  }
+
   // Check if there was a reason to force inlining or no inlining.
   if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
     return InlineCost::getNever(ShouldInline.getFailureReason());
@@ -2761,8 +3002,8 @@
 InlineCostAnnotationPrinterPass::run(Function &F,
                                      FunctionAnalysisManager &FAM) {
   PrintInstructionComments = true;
-  std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&](
-      Function &F) -> AssumptionCache & {
+  std::function<AssumptionCache &(Function &)> GetAssumptionCache =
+      [&](Function &F) -> AssumptionCache & {
     return FAM.getResult<AssumptionAnalysis>(F);
   };
   Module *M = F.getParent();
diff --git a/src/llvm-project/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp b/src/llvm-project/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp
index c26cdf2..7d1e630 100644
--- a/src/llvm-project/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/InstructionPrecedenceTracking.cpp
@@ -111,6 +111,13 @@
     FirstSpecialInsts.erase(Inst->getParent());
 }
 
+void InstructionPrecedenceTracking::removeUsersOf(const Instruction *Inst) {
+  for (const auto *U : Inst->users()) {
+    if (const auto *UI = dyn_cast<Instruction>(U))
+      removeInstruction(UI);
+  }
+}
+
 void InstructionPrecedenceTracking::clear() {
   FirstSpecialInsts.clear();
 #ifndef NDEBUG
diff --git a/src/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp b/src/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
index a128168..23083bc 100644
--- a/src/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -17,7 +17,10 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Analysis/InstructionSimplify.h"
+
+#include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/Statistic.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumptionCache.h"
@@ -26,6 +29,7 @@
 #include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/LoopAnalysisManager.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/OverflowInstAnalysis.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/Analysis/VectorUtils.h"
 #include "llvm/IR/ConstantRange.h"
@@ -68,6 +72,8 @@
                                const SimplifyQuery &, unsigned);
 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &,
                               unsigned);
+static Value *SimplifySelectInst(Value *, Value *, Value *,
+                                 const SimplifyQuery &, unsigned);
 
 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
                                      Value *FalseVal) {
@@ -185,12 +191,15 @@
   // If the false value simplified to false, then the result of the compare
   // is equal to "Cond && TCmp".  This also catches the case when the false
   // value simplified to false and the true value to true, returning "Cond".
-  if (match(FCmp, m_Zero()))
+  // Folding select to and/or isn't poison-safe in general; impliesPoison
+  // checks whether folding it does not convert a well-defined value into
+  // poison.
+  if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
     if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
       return V;
   // If the true value simplified to true, then the result of the compare
   // is equal to "Cond || FCmp".
-  if (match(TCmp, m_One()))
+  if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
     if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
       return V;
   // Finally, if the false value simplified to true and the true value to
@@ -221,8 +230,8 @@
 
   // Otherwise, if the instruction is in the entry block and is not an invoke,
   // then it obviously dominates all phi nodes.
-  if (I->getParent() == &I->getFunction()->getEntryBlock() &&
-      !isa<InvokeInst>(I) && !isa<CallBrInst>(I))
+  if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
+      !isa<CallBrInst>(I))
     return true;
 
   return false;
@@ -730,6 +739,11 @@
   if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
     return C;
 
+  // X - poison -> poison
+  // poison - X -> poison
+  if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
+    return PoisonValue::get(Op0->getType());
+
   // X - undef -> undef
   // undef - X -> undef
   if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
@@ -865,6 +879,10 @@
   if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
     return C;
 
+  // X * poison -> poison
+  if (isa<PoisonValue>(Op1))
+    return Op1;
+
   // X * undef -> 0
   // X * 0 -> 0
   if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
@@ -920,8 +938,11 @@
 
 /// Check for common or similar folds of integer division or integer remainder.
 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
-static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv,
-                             const SimplifyQuery &Q) {
+static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
+                             Value *Op1, const SimplifyQuery &Q) {
+  bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
+  bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
+
   Type *Ty = Op0->getType();
 
   // X / undef -> poison
@@ -948,6 +969,11 @@
     }
   }
 
+  // poison / X -> poison
+  // poison % X -> poison
+  if (isa<PoisonValue>(Op0))
+    return Op0;
+
   // undef / X -> 0
   // undef % X -> 0
   if (Q.isUndefValue(Op0))
@@ -973,6 +999,21 @@
       (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
     return IsDiv ? Op0 : Constant::getNullValue(Ty);
 
+  // If X * Y does not overflow, then:
+  //   X * Y / Y -> X
+  //   X * Y % Y -> 0
+  if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
+    auto *Mul = cast<OverflowingBinaryOperator>(Op0);
+    // The multiplication can't overflow if it is defined not to, or if
+    // X == A / Y for some A.
+    if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
+        (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
+        (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
+        (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
+      return IsDiv ? X : Constant::getNullValue(Op0->getType());
+    }
+  }
+
   return nullptr;
 }
 
@@ -1044,25 +1085,11 @@
   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
     return C;
 
-  if (Value *V = simplifyDivRem(Op0, Op1, true, Q))
+  if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
     return V;
 
   bool IsSigned = Opcode == Instruction::SDiv;
 
-  // (X * Y) / Y -> X if the multiplication does not overflow.
-  Value *X;
-  if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
-    auto *Mul = cast<OverflowingBinaryOperator>(Op0);
-    // If the Mul does not overflow, then we are good to go.
-    if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
-        (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)))
-      return X;
-    // If X has the form X = A / Y, then X * Y cannot overflow.
-    if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
-        (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1)))))
-      return X;
-  }
-
   // (X rem Y) / Y -> 0
   if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
       (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
@@ -1070,7 +1097,7 @@
 
   // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
   ConstantInt *C1, *C2;
-  if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
+  if (!IsSigned && match(Op0, m_UDiv(m_Value(), m_ConstantInt(C1))) &&
       match(Op1, m_ConstantInt(C2))) {
     bool Overflow;
     (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
@@ -1102,7 +1129,7 @@
   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
     return C;
 
-  if (Value *V = simplifyDivRem(Op0, Op1, false, Q))
+  if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
     return V;
 
   // (X % Y) % Y -> X % Y
@@ -1209,8 +1236,7 @@
 
   // Shifting by the bitwidth or more is undefined.
   if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
-    if (CI->getValue().getLimitedValue() >=
-        CI->getType()->getScalarSizeInBits())
+    if (CI->getValue().uge(CI->getType()->getScalarSizeInBits()))
       return true;
 
   // If all lanes of a vector shift are undefined the whole shift is.
@@ -1229,10 +1255,15 @@
 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
 /// If not, this returns null.
 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
-                            Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) {
+                            Value *Op1, bool IsNSW, const SimplifyQuery &Q,
+                            unsigned MaxRecurse) {
   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
     return C;
 
+  // poison shift by X -> poison
+  if (isa<PoisonValue>(Op0))
+    return Op0;
+
   // 0 shift by X -> 0
   if (match(Op0, m_Zero()))
     return Constant::getNullValue(Op0->getType());
@@ -1263,16 +1294,31 @@
 
   // If any bits in the shift amount make that value greater than or equal to
   // the number of bits in the type, the shift is undefined.
-  KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
-  if (Known.One.getLimitedValue() >= Known.getBitWidth())
+  KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+  if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
     return PoisonValue::get(Op0->getType());
 
   // If all valid bits in the shift amount are known zero, the first operand is
   // unchanged.
-  unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
-  if (Known.countMinTrailingZeros() >= NumValidShiftBits)
+  unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
+  if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
     return Op0;
 
+  // Check for nsw shl leading to a poison value.
+  if (IsNSW) {
+    assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
+    KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+    KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
+
+    if (KnownVal.Zero.isSignBitSet())
+      KnownShl.Zero.setSignBit();
+    if (KnownVal.One.isSignBitSet())
+      KnownShl.One.setSignBit();
+
+    if (KnownShl.hasConflict())
+      return PoisonValue::get(Op0->getType());
+  }
+
   return nullptr;
 }
 
@@ -1281,7 +1327,8 @@
 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
                                  Value *Op1, bool isExact, const SimplifyQuery &Q,
                                  unsigned MaxRecurse) {
-  if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
+  if (Value *V =
+          SimplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
     return V;
 
   // X >> X -> 0
@@ -1307,7 +1354,8 @@
 /// If not, this returns null.
 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
                               const SimplifyQuery &Q, unsigned MaxRecurse) {
-  if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
+  if (Value *V =
+          SimplifyShift(Instruction::Shl, Op0, Op1, isNSW, Q, MaxRecurse))
     return V;
 
   // undef << X -> 0
@@ -1928,77 +1976,6 @@
   return nullptr;
 }
 
-/// Check that the Op1 is in expected form, i.e.:
-///   %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???)
-///   %Op1 = extractvalue { i4, i1 } %Agg, 1
-static bool omitCheckForZeroBeforeMulWithOverflowInternal(Value *Op1,
-                                                          Value *X) {
-  auto *Extract = dyn_cast<ExtractValueInst>(Op1);
-  // We should only be extracting the overflow bit.
-  if (!Extract || !Extract->getIndices().equals(1))
-    return false;
-  Value *Agg = Extract->getAggregateOperand();
-  // This should be a multiplication-with-overflow intrinsic.
-  if (!match(Agg, m_CombineOr(m_Intrinsic<Intrinsic::umul_with_overflow>(),
-                              m_Intrinsic<Intrinsic::smul_with_overflow>())))
-    return false;
-  // One of its multipliers should be the value we checked for zero before.
-  if (!match(Agg, m_CombineOr(m_Argument<0>(m_Specific(X)),
-                              m_Argument<1>(m_Specific(X)))))
-    return false;
-  return true;
-}
-
-/// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some
-/// other form of check, e.g. one that was using division; it may have been
-/// guarded against division-by-zero. We can drop that check now.
-/// Look for:
-///   %Op0 = icmp ne i4 %X, 0
-///   %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???)
-///   %Op1 = extractvalue { i4, i1 } %Agg, 1
-///   %??? = and i1 %Op0, %Op1
-/// We can just return  %Op1
-static Value *omitCheckForZeroBeforeMulWithOverflow(Value *Op0, Value *Op1) {
-  ICmpInst::Predicate Pred;
-  Value *X;
-  if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) ||
-      Pred != ICmpInst::Predicate::ICMP_NE)
-    return nullptr;
-  // Is Op1 in expected form?
-  if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X))
-    return nullptr;
-  // Can omit 'and', and just return the overflow bit.
-  return Op1;
-}
-
-/// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some
-/// other form of check, e.g. one that was using division; it may have been
-/// guarded against division-by-zero. We can drop that check now.
-/// Look for:
-///   %Op0 = icmp eq i4 %X, 0
-///   %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???)
-///   %Op1 = extractvalue { i4, i1 } %Agg, 1
-///   %NotOp1 = xor i1 %Op1, true
-///   %or = or i1 %Op0, %NotOp1
-/// We can just return  %NotOp1
-static Value *omitCheckForZeroBeforeInvertedMulWithOverflow(Value *Op0,
-                                                            Value *NotOp1) {
-  ICmpInst::Predicate Pred;
-  Value *X;
-  if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) ||
-      Pred != ICmpInst::Predicate::ICMP_EQ)
-    return nullptr;
-  // We expect the other hand of an 'or' to be a 'not'.
-  Value *Op1;
-  if (!match(NotOp1, m_Not(m_Value(Op1))))
-    return nullptr;
-  // Is Op1 in expected form?
-  if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X))
-    return nullptr;
-  // Can omit 'and', and just return the inverted overflow bit.
-  return NotOp1;
-}
-
 /// Given a bitwise logic op, check if the operands are add/sub with a common
 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
@@ -2030,6 +2007,10 @@
   if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
     return C;
 
+  // X & poison -> poison
+  if (isa<PoisonValue>(Op1))
+    return Op1;
+
   // X & undef -> 0
   if (Q.isUndefValue(Op1))
     return Constant::getNullValue(Op0->getType());
@@ -2083,10 +2064,10 @@
   // If we have a multiplication overflow check that is being 'and'ed with a
   // check that one of the multipliers is not zero, we can omit the 'and', and
   // only keep the overflow check.
-  if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op0, Op1))
-    return V;
-  if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op1, Op0))
-    return V;
+  if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
+    return Op1;
+  if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true))
+    return Op0;
 
   // A & (-A) = A if A is a power of two or zero.
   if (match(Op0, m_Neg(m_Specific(Op1))) ||
@@ -2198,6 +2179,10 @@
   if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
     return C;
 
+  // X | poison -> poison
+  if (isa<PoisonValue>(Op1))
+    return Op1;
+
   // X | undef -> -1
   // X | -1 = -1
   // Do not return Op1 because it may contain undef elements if it's a vector.
@@ -2297,10 +2282,10 @@
   // If we have a multiplication overflow check that is being 'and'ed with a
   // check that one of the multipliers is not zero, we can omit the 'and', and
   // only keep the overflow check.
-  if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op0, Op1))
-    return V;
-  if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op1, Op0))
-    return V;
+  if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
+    return Op1;
+  if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
+    return Op0;
 
   // Try some generic simplifications for associative operations.
   if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
@@ -2469,10 +2454,14 @@
 // area, it may be possible to update LLVM's semantics accordingly and reinstate
 // this optimization.
 static Constant *
-computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
-                   const DominatorTree *DT, CmpInst::Predicate Pred,
-                   AssumptionCache *AC, const Instruction *CxtI,
-                   const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) {
+computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
+                   const SimplifyQuery &Q) {
+  const DataLayout &DL = Q.DL;
+  const TargetLibraryInfo *TLI = Q.TLI;
+  const DominatorTree *DT = Q.DT;
+  const Instruction *CxtI = Q.CxtI;
+  const InstrInfoQuery &IIQ = Q.IIQ;
+
   // First, skip past any trivial no-ops.
   LHS = LHS->stripPointerCasts();
   RHS = RHS->stripPointerCasts();
@@ -3395,6 +3384,10 @@
 
   Type *ITy = GetCompareTy(LHS); // The return type.
 
+  // icmp poison, X -> poison
+  if (isa<PoisonValue>(RHS))
+    return PoisonValue::get(ITy);
+
   // For EQ and NE, we can always pick a value for the undef to make the
   // predicate pass or fail, so we can return undef.
   // Matches behavior in llvm::ConstantFoldCompareInstruction.
@@ -3409,6 +3402,8 @@
   if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
     return V;
 
+  // TODO: Sink/common this with other potentially expensive calls that use
+  //       ValueTracking? See comment below for isKnownNonEqual().
   if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
     return V;
 
@@ -3428,13 +3423,10 @@
       auto LHS_CR = getConstantRangeFromMetadata(
           *LHS_Instr->getMetadata(LLVMContext::MD_range));
 
-      auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR);
-      if (Satisfied_CR.contains(LHS_CR))
+      if (LHS_CR.icmp(Pred, RHS_CR))
         return ConstantInt::getTrue(RHS->getContext());
 
-      auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion(
-                CmpInst::getInversePredicate(Pred), RHS_CR);
-      if (InversedSatisfied_CR.contains(LHS_CR))
+      if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
         return ConstantInt::getFalse(RHS->getContext());
     }
   }
@@ -3617,7 +3609,9 @@
   }
 
   // icmp eq|ne X, Y -> false|true if X != Y
-  if (ICmpInst::isEquality(Pred) &&
+  // This is potentially expensive, and we have already computedKnownBits for
+  // compares with 0 above here, so only try this for a non-zero compare.
+  if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
       isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
     return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
   }
@@ -3634,8 +3628,7 @@
   // Simplify comparisons of related pointers using a powerful, recursive
   // GEP-walk when we have target data available..
   if (LHS->getType()->isPointerTy())
-    if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
-                                     Q.IIQ, LHS, RHS))
+    if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
       return C;
   if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
     if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
@@ -3643,9 +3636,8 @@
               Q.DL.getTypeSizeInBits(CLHS->getType()) &&
           Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
               Q.DL.getTypeSizeInBits(CRHS->getType()))
-        if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
-                                         Q.IIQ, CLHS->getPointerOperand(),
-                                         CRHS->getPointerOperand()))
+        if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
+                                         CRHS->getPointerOperand(), Q))
           return C;
 
   if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
@@ -3728,6 +3720,11 @@
   if (match(RHS, m_NaN()))
     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
 
+  // fcmp pred x, poison and  fcmp pred poison, x
+  // fold to poison
+  if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
+    return PoisonValue::get(RetTy);
+
   // fcmp pred x, undef  and  fcmp pred undef, x
   // fold to true if unordered, false if ordered
   if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
@@ -3896,10 +3893,12 @@
   return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
 }
 
-static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
+static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
                                      const SimplifyQuery &Q,
                                      bool AllowRefinement,
                                      unsigned MaxRecurse) {
+  assert(!Op->getType()->isVectorTy() && "This is not safe for vectors");
+
   // Trivial replacement.
   if (V == Op)
     return RepOp;
@@ -3909,109 +3908,110 @@
     return nullptr;
 
   auto *I = dyn_cast<Instruction>(V);
-  if (!I)
+  if (!I || !is_contained(I->operands(), Op))
     return nullptr;
 
+  // Replace Op with RepOp in instruction operands.
+  SmallVector<Value *, 8> NewOps(I->getNumOperands());
+  transform(I->operands(), NewOps.begin(),
+            [&](Value *V) { return V == Op ? RepOp : V; });
+
+  if (!AllowRefinement) {
+    // General InstSimplify functions may refine the result, e.g. by returning
+    // a constant for a potentially poison value. To avoid this, implement only
+    // a few non-refining but profitable transforms here.
+
+    if (auto *BO = dyn_cast<BinaryOperator>(I)) {
+      unsigned Opcode = BO->getOpcode();
+      // id op x -> x, x op id -> x
+      if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
+        return NewOps[1];
+      if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
+                                                      /* RHS */ true))
+        return NewOps[0];
+
+      // x & x -> x, x | x -> x
+      if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
+          NewOps[0] == NewOps[1])
+        return NewOps[0];
+    }
+
+    if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
+      // getelementptr x, 0 -> x
+      if (NewOps.size() == 2 && match(NewOps[1], m_Zero()) &&
+          !GEP->isInBounds())
+        return NewOps[0];
+    }
+  } else if (MaxRecurse) {
+    // The simplification queries below may return the original value. Consider:
+    //   %div = udiv i32 %arg, %arg2
+    //   %mul = mul nsw i32 %div, %arg2
+    //   %cmp = icmp eq i32 %mul, %arg
+    //   %sel = select i1 %cmp, i32 %div, i32 undef
+    // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
+    // simplifies back to %arg. This can only happen because %mul does not
+    // dominate %div. To ensure a consistent return value contract, we make sure
+    // that this case returns nullptr as well.
+    auto PreventSelfSimplify = [V](Value *Simplified) {
+      return Simplified != V ? Simplified : nullptr;
+    };
+
+    if (auto *B = dyn_cast<BinaryOperator>(I))
+      return PreventSelfSimplify(SimplifyBinOp(B->getOpcode(), NewOps[0],
+                                               NewOps[1], Q, MaxRecurse - 1));
+
+    if (CmpInst *C = dyn_cast<CmpInst>(I))
+      return PreventSelfSimplify(SimplifyCmpInst(C->getPredicate(), NewOps[0],
+                                                 NewOps[1], Q, MaxRecurse - 1));
+
+    if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
+      return PreventSelfSimplify(SimplifyGEPInst(GEP->getSourceElementType(),
+                                                 NewOps, Q, MaxRecurse - 1));
+
+    if (isa<SelectInst>(I))
+      return PreventSelfSimplify(
+          SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q,
+                             MaxRecurse - 1));
+    // TODO: We could hand off more cases to instsimplify here.
+  }
+
+  // If all operands are constant after substituting Op for RepOp then we can
+  // constant fold the instruction.
+  SmallVector<Constant *, 8> ConstOps;
+  for (Value *NewOp : NewOps) {
+    if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
+      ConstOps.push_back(ConstOp);
+    else
+      return nullptr;
+  }
+
   // Consider:
   //   %cmp = icmp eq i32 %x, 2147483647
   //   %add = add nsw i32 %x, 1
   //   %sel = select i1 %cmp, i32 -2147483648, i32 %add
   //
-  // We can't replace %sel with %add unless we strip away the flags (which will
-  // be done in InstCombine).
-  // TODO: This is unsound, because it only catches some forms of refinement.
+  // We can't replace %sel with %add unless we strip away the flags (which
+  // will be done in InstCombine).
+  // TODO: This may be unsound, because it only catches some forms of
+  // refinement.
   if (!AllowRefinement && canCreatePoison(cast<Operator>(I)))
     return nullptr;
 
-  // The simplification queries below may return the original value. Consider:
-  //   %div = udiv i32 %arg, %arg2
-  //   %mul = mul nsw i32 %div, %arg2
-  //   %cmp = icmp eq i32 %mul, %arg
-  //   %sel = select i1 %cmp, i32 %div, i32 undef
-  // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
-  // simplifies back to %arg. This can only happen because %mul does not
-  // dominate %div. To ensure a consistent return value contract, we make sure
-  // that this case returns nullptr as well.
-  auto PreventSelfSimplify = [V](Value *Simplified) {
-    return Simplified != V ? Simplified : nullptr;
-  };
+  if (CmpInst *C = dyn_cast<CmpInst>(I))
+    return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
+                                           ConstOps[1], Q.DL, Q.TLI);
 
-  // If this is a binary operator, try to simplify it with the replaced op.
-  if (auto *B = dyn_cast<BinaryOperator>(I)) {
-    if (MaxRecurse) {
-      if (B->getOperand(0) == Op)
-        return PreventSelfSimplify(SimplifyBinOp(B->getOpcode(), RepOp,
-                                                 B->getOperand(1), Q,
-                                                 MaxRecurse - 1));
-      if (B->getOperand(1) == Op)
-        return PreventSelfSimplify(SimplifyBinOp(B->getOpcode(),
-                                                 B->getOperand(0), RepOp, Q,
-                                                 MaxRecurse - 1));
-    }
-  }
+  if (LoadInst *LI = dyn_cast<LoadInst>(I))
+    if (!LI->isVolatile())
+      return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
 
-  // Same for CmpInsts.
-  if (CmpInst *C = dyn_cast<CmpInst>(I)) {
-    if (MaxRecurse) {
-      if (C->getOperand(0) == Op)
-        return PreventSelfSimplify(SimplifyCmpInst(C->getPredicate(), RepOp,
-                                                   C->getOperand(1), Q,
-                                                   MaxRecurse - 1));
-      if (C->getOperand(1) == Op)
-        return PreventSelfSimplify(SimplifyCmpInst(C->getPredicate(),
-                                                   C->getOperand(0), RepOp, Q,
-                                                   MaxRecurse - 1));
-    }
-  }
-
-  // Same for GEPs.
-  if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
-    if (MaxRecurse) {
-      SmallVector<Value *, 8> NewOps(GEP->getNumOperands());
-      transform(GEP->operands(), NewOps.begin(),
-                [&](Value *V) { return V == Op ? RepOp : V; });
-      return PreventSelfSimplify(SimplifyGEPInst(GEP->getSourceElementType(),
-                                                 NewOps, Q, MaxRecurse - 1));
-    }
-  }
-
-  // TODO: We could hand off more cases to instsimplify here.
-
-  // If all operands are constant after substituting Op for RepOp then we can
-  // constant fold the instruction.
-  if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
-    // Build a list of all constant operands.
-    SmallVector<Constant *, 8> ConstOps;
-    for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
-      if (I->getOperand(i) == Op)
-        ConstOps.push_back(CRepOp);
-      else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
-        ConstOps.push_back(COp);
-      else
-        break;
-    }
-
-    // All operands were constants, fold it.
-    if (ConstOps.size() == I->getNumOperands()) {
-      if (CmpInst *C = dyn_cast<CmpInst>(I))
-        return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
-                                               ConstOps[1], Q.DL, Q.TLI);
-
-      if (LoadInst *LI = dyn_cast<LoadInst>(I))
-        if (!LI->isVolatile())
-          return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
-
-      return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
-    }
-  }
-
-  return nullptr;
+  return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
 }
 
-Value *llvm::SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
+Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
                                     const SimplifyQuery &Q,
                                     bool AllowRefinement) {
-  return ::SimplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
+  return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
                                   RecursionLimit);
 }
 
@@ -4133,17 +4133,17 @@
   // Note that the equivalence/replacement opportunity does not hold for vectors
   // because each element of a vector select is chosen independently.
   if (Pred == ICmpInst::ICMP_EQ && !CondVal->getType()->isVectorTy()) {
-    if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
+    if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
                                /* AllowRefinement */ false, MaxRecurse) ==
             TrueVal ||
-        SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
+        simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
                                /* AllowRefinement */ false, MaxRecurse) ==
             TrueVal)
       return FalseVal;
-    if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
+    if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
                                /* AllowRefinement */ true, MaxRecurse) ==
             FalseVal ||
-        SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
+        simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
                                /* AllowRefinement */ true, MaxRecurse) ==
             FalseVal)
       return FalseVal;
@@ -4192,17 +4192,21 @@
       if (auto *FalseC = dyn_cast<Constant>(FalseVal))
         return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
 
+    // select poison, X, Y -> poison
+    if (isa<PoisonValue>(CondC))
+      return PoisonValue::get(TrueVal->getType());
+
     // select undef, X, Y -> X or Y
     if (Q.isUndefValue(CondC))
       return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
 
-    // TODO: Vector constants with undef elements don't simplify.
-
-    // select true, X, Y  -> X
-    if (CondC->isAllOnesValue())
+    // select true,  X, Y --> X
+    // select false, X, Y --> Y
+    // For vectors, allow undef/poison elements in the condition to match the
+    // defined elements, so we can eliminate the select.
+    if (match(CondC, m_One()))
       return TrueVal;
-    // select false, X, Y -> Y
-    if (CondC->isNullValue())
+    if (match(CondC, m_Zero()))
       return FalseVal;
   }
 
@@ -4219,15 +4223,20 @@
   if (TrueVal == FalseVal)
     return TrueVal;
 
+  // If the true or false value is poison, we can fold to the other value.
   // If the true or false value is undef, we can fold to the other value as
   // long as the other value isn't poison.
-  // select ?, undef, X -> X
-  if (Q.isUndefValue(TrueVal) &&
-      isGuaranteedNotToBeUndefOrPoison(FalseVal, Q.AC, Q.CxtI, Q.DT))
+  // select ?, poison, X -> X
+  // select ?, undef,  X -> X
+  if (isa<PoisonValue>(TrueVal) ||
+      (Q.isUndefValue(TrueVal) &&
+       isGuaranteedNotToBePoison(FalseVal, Q.AC, Q.CxtI, Q.DT)))
     return FalseVal;
-  // select ?, X, undef -> X
-  if (Q.isUndefValue(FalseVal) &&
-      isGuaranteedNotToBeUndefOrPoison(TrueVal, Q.AC, Q.CxtI, Q.DT))
+  // select ?, X, poison -> X
+  // select ?, X, undef  -> X
+  if (isa<PoisonValue>(FalseVal) ||
+      (Q.isUndefValue(FalseVal) &&
+       isGuaranteedNotToBePoison(TrueVal, Q.AC, Q.CxtI, Q.DT)))
     return TrueVal;
 
   // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
@@ -4249,11 +4258,11 @@
       // one element is undef, choose the defined element as the safe result.
       if (TEltC == FEltC)
         NewC.push_back(TEltC);
-      else if (Q.isUndefValue(TEltC) &&
-               isGuaranteedNotToBeUndefOrPoison(FEltC))
+      else if (isa<PoisonValue>(TEltC) ||
+               (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
         NewC.push_back(FEltC);
-      else if (Q.isUndefValue(FEltC) &&
-               isGuaranteedNotToBeUndefOrPoison(TEltC))
+      else if (isa<PoisonValue>(FEltC) ||
+               (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
         NewC.push_back(TEltC);
       else
         break;
@@ -4299,10 +4308,14 @@
   // Compute the (pointer) type returned by the GEP instruction.
   Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
   Type *GEPTy = PointerType::get(LastType, AS);
-  if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
-    GEPTy = VectorType::get(GEPTy, VT->getElementCount());
-  else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType()))
-    GEPTy = VectorType::get(GEPTy, VT->getElementCount());
+  for (Value *Op : Ops) {
+    // If one of the operands is a vector, the result type is a vector of
+    // pointers. All vector operands must have the same number of elements.
+    if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
+      GEPTy = VectorType::get(GEPTy, VT->getElementCount());
+      break;
+    }
+  }
 
   // getelementptr poison, idx -> poison
   // getelementptr baseptr, poison -> poison
@@ -4312,7 +4325,10 @@
   if (Q.isUndefValue(Ops[0]))
     return UndefValue::get(GEPTy);
 
-  bool IsScalableVec = isa<ScalableVectorType>(SrcTy);
+  bool IsScalableVec =
+      isa<ScalableVectorType>(SrcTy) || any_of(Ops, [](const Value *V) {
+        return isa<ScalableVectorType>(V->getType());
+      });
 
   if (Ops.size() == 2) {
     // getelementptr P, 0 -> P.
@@ -4332,40 +4348,32 @@
       // doesn't truncate the pointers.
       if (Ops[1]->getType()->getScalarSizeInBits() ==
           Q.DL.getPointerSizeInBits(AS)) {
-        auto PtrToInt = [GEPTy](Value *P) -> Value * {
-          Value *Temp;
-          if (match(P, m_PtrToInt(m_Value(Temp))))
-            if (Temp->getType() == GEPTy)
-              return Temp;
-          return nullptr;
+        auto CanSimplify = [GEPTy, &P, V = Ops[0]]() -> bool {
+          return P->getType() == GEPTy &&
+                 getUnderlyingObject(P) == getUnderlyingObject(V);
         };
-
-        // FIXME: The following transforms are only legal if P and V have the
-        // same provenance (PR44403). Check whether getUnderlyingObject() is
-        // the same?
-
         // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
         if (TyAllocSize == 1 &&
-            match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
-          if (Value *R = PtrToInt(P))
-            return R;
+            match(Ops[1], m_Sub(m_PtrToInt(m_Value(P)),
+                                m_PtrToInt(m_Specific(Ops[0])))) &&
+            CanSimplify())
+          return P;
 
-        // getelementptr V, (ashr (sub P, V), C) -> Q
-        // if P points to a type of size 1 << C.
-        if (match(Ops[1],
-                  m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
-                         m_ConstantInt(C))) &&
-            TyAllocSize == 1ULL << C)
-          if (Value *R = PtrToInt(P))
-            return R;
+        // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
+        // size 1 << C.
+        if (match(Ops[1], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
+                                       m_PtrToInt(m_Specific(Ops[0]))),
+                                 m_ConstantInt(C))) &&
+            TyAllocSize == 1ULL << C && CanSimplify())
+          return P;
 
-        // getelementptr V, (sdiv (sub P, V), C) -> Q
-        // if P points to a type of size C.
-        if (match(Ops[1],
-                  m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
-                         m_SpecificInt(TyAllocSize))))
-          if (Value *R = PtrToInt(P))
-            return R;
+        // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
+        // size C.
+        if (match(Ops[1], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
+                                       m_PtrToInt(m_Specific(Ops[0]))),
+                                 m_SpecificInt(TyAllocSize))) &&
+            CanSimplify())
+          return P;
       }
     }
   }
@@ -4525,30 +4533,33 @@
     if (auto *CIdx = dyn_cast<Constant>(Idx))
       return ConstantExpr::getExtractElement(CVec, CIdx);
 
-    // The index is not relevant if our vector is a splat.
-    if (auto *Splat = CVec->getSplatValue())
-      return Splat;
-
     if (Q.isUndefValue(Vec))
       return UndefValue::get(VecVTy->getElementType());
   }
 
-  // If extracting a specified index from the vector, see if we can recursively
-  // find a previously computed scalar that was inserted into the vector.
-  if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
-    // For fixed-length vector, fold into undef if index is out of bounds.
-    if (isa<FixedVectorType>(VecVTy) &&
-        IdxC->getValue().uge(cast<FixedVectorType>(VecVTy)->getNumElements()))
-      return PoisonValue::get(VecVTy->getElementType());
-    if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
-      return Elt;
-  }
-
   // An undef extract index can be arbitrarily chosen to be an out-of-range
   // index value, which would result in the instruction being poison.
   if (Q.isUndefValue(Idx))
     return PoisonValue::get(VecVTy->getElementType());
 
+  // If extracting a specified index from the vector, see if we can recursively
+  // find a previously computed scalar that was inserted into the vector.
+  if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
+    // For fixed-length vector, fold into undef if index is out of bounds.
+    unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
+    if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
+      return PoisonValue::get(VecVTy->getElementType());
+    // Handle case where an element is extracted from a splat.
+    if (IdxC->getValue().ult(MinNumElts))
+      if (auto *Splat = getSplatValue(Vec))
+        return Splat;
+    if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
+      return Elt;
+  } else {
+    // The index is not relevant if our vector is a splat.
+    if (Value *Splat = getSplatValue(Vec))
+      return Splat;
+  }
   return nullptr;
 }
 
@@ -4558,7 +4569,8 @@
 }
 
 /// See if we can fold the given phi. If not, returns null.
-static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) {
+static Value *SimplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
+                              const SimplifyQuery &Q) {
   // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
   //          here, because the PHI we may succeed simplifying to was not
   //          def-reachable from the original PHI!
@@ -4567,7 +4579,7 @@
   // with the common value.
   Value *CommonValue = nullptr;
   bool HasUndefInput = false;
-  for (Value *Incoming : PN->incoming_values()) {
+  for (Value *Incoming : IncomingValues) {
     // If the incoming value is the phi node itself, it can safely be skipped.
     if (Incoming == PN) continue;
     if (Q.isUndefValue(Incoming)) {
@@ -4844,11 +4856,17 @@
 }
 
 /// Perform folds that are common to any floating-point operation. This implies
-/// transforms based on undef/NaN because the operation itself makes no
+/// transforms based on poison/undef/NaN because the operation itself makes no
 /// difference to the result.
-static Constant *simplifyFPOp(ArrayRef<Value *> Ops,
-                              FastMathFlags FMF,
-                              const SimplifyQuery &Q) {
+static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
+                              const SimplifyQuery &Q,
+                              fp::ExceptionBehavior ExBehavior,
+                              RoundingMode Rounding) {
+  // Poison is independent of anything else. It always propagates from an
+  // operand to a math result.
+  if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
+    return PoisonValue::get(Ops[0]->getType());
+
   for (Value *V : Ops) {
     bool IsNan = match(V, m_NaN());
     bool IsInf = match(V, m_Inf());
@@ -4862,21 +4880,33 @@
     if (FMF.noInfs() && (IsInf || IsUndef))
       return PoisonValue::get(V->getType());
 
-    if (IsUndef || IsNan)
-      return propagateNaN(cast<Constant>(V));
+    if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
+      if (IsUndef || IsNan)
+        return propagateNaN(cast<Constant>(V));
+    } else if (ExBehavior != fp::ebStrict) {
+      if (IsNan)
+        return propagateNaN(cast<Constant>(V));
+    }
   }
   return nullptr;
 }
 
 /// Given operands for an FAdd, see if we can fold the result.  If not, this
 /// returns null.
-static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                               const SimplifyQuery &Q, unsigned MaxRecurse) {
-  if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
+static Value *
+SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+                 const SimplifyQuery &Q, unsigned MaxRecurse,
+                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
+                 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
+  if (isDefaultFPEnvironment(ExBehavior, Rounding))
+    if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
+      return C;
+
+  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
     return C;
 
-  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
-    return C;
+  if (!isDefaultFPEnvironment(ExBehavior, Rounding))
+    return nullptr;
 
   // fadd X, -0 ==> X
   if (match(Op1, m_NegZeroFP()))
@@ -4917,13 +4947,20 @@
 
 /// Given operands for an FSub, see if we can fold the result.  If not, this
 /// returns null.
-static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                               const SimplifyQuery &Q, unsigned MaxRecurse) {
-  if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
+static Value *
+SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+                 const SimplifyQuery &Q, unsigned MaxRecurse,
+                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
+                 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
+  if (isDefaultFPEnvironment(ExBehavior, Rounding))
+    if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
+      return C;
+
+  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
     return C;
 
-  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
-    return C;
+  if (!isDefaultFPEnvironment(ExBehavior, Rounding))
+    return nullptr;
 
   // fsub X, +0 ==> X
   if (match(Op1, m_PosZeroFP()))
@@ -4963,10 +5000,15 @@
 }
 
 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
-                              const SimplifyQuery &Q, unsigned MaxRecurse) {
-  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
+                              const SimplifyQuery &Q, unsigned MaxRecurse,
+                              fp::ExceptionBehavior ExBehavior,
+                              RoundingMode Rounding) {
+  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
     return C;
 
+  if (!isDefaultFPEnvironment(ExBehavior, Rounding))
+    return nullptr;
+
   // fmul X, 1.0 ==> X
   if (match(Op1, m_FPOne()))
     return Op0;
@@ -4996,43 +5038,65 @@
 }
 
 /// Given the operands for an FMul, see if we can fold the result
-static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                               const SimplifyQuery &Q, unsigned MaxRecurse) {
-  if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
-    return C;
+static Value *
+SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+                 const SimplifyQuery &Q, unsigned MaxRecurse,
+                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
+                 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
+  if (isDefaultFPEnvironment(ExBehavior, Rounding))
+    if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
+      return C;
 
   // Now apply simplifications that do not require rounding.
-  return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse);
+  return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
 }
 
 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                              const SimplifyQuery &Q) {
-  return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
+                              const SimplifyQuery &Q,
+                              fp::ExceptionBehavior ExBehavior,
+                              RoundingMode Rounding) {
+  return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
+                            Rounding);
 }
 
-
 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                              const SimplifyQuery &Q) {
-  return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit);
+                              const SimplifyQuery &Q,
+                              fp::ExceptionBehavior ExBehavior,
+                              RoundingMode Rounding) {
+  return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
+                            Rounding);
 }
 
 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                              const SimplifyQuery &Q) {
-  return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
+                              const SimplifyQuery &Q,
+                              fp::ExceptionBehavior ExBehavior,
+                              RoundingMode Rounding) {
+  return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
+                            Rounding);
 }
 
 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
-                             const SimplifyQuery &Q) {
-  return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit);
+                             const SimplifyQuery &Q,
+                             fp::ExceptionBehavior ExBehavior,
+                             RoundingMode Rounding) {
+  return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
+                           Rounding);
 }
 
-static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                               const SimplifyQuery &Q, unsigned) {
-  if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
+static Value *
+SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+                 const SimplifyQuery &Q, unsigned,
+                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
+                 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
+  if (isDefaultFPEnvironment(ExBehavior, Rounding))
+    if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
+      return C;
+
+  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
     return C;
 
-  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
-    return C;
+  if (!isDefaultFPEnvironment(ExBehavior, Rounding))
+    return nullptr;
 
   // X / 1.0 -> X
   if (match(Op1, m_FPOne()))
@@ -5067,17 +5131,27 @@
 }
 
 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                              const SimplifyQuery &Q) {
-  return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
+                              const SimplifyQuery &Q,
+                              fp::ExceptionBehavior ExBehavior,
+                              RoundingMode Rounding) {
+  return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
+                            Rounding);
 }
 
-static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                               const SimplifyQuery &Q, unsigned) {
-  if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
+static Value *
+SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+                 const SimplifyQuery &Q, unsigned,
+                 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
+                 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
+  if (isDefaultFPEnvironment(ExBehavior, Rounding))
+    if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
+      return C;
+
+  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
     return C;
 
-  if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
-    return C;
+  if (!isDefaultFPEnvironment(ExBehavior, Rounding))
+    return nullptr;
 
   // Unlike fdiv, the result of frem always matches the sign of the dividend.
   // The constant match may include undef elements in a vector, so return a full
@@ -5095,8 +5169,11 @@
 }
 
 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
-                              const SimplifyQuery &Q) {
-  return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
+                              const SimplifyQuery &Q,
+                              fp::ExceptionBehavior ExBehavior,
+                              RoundingMode Rounding) {
+  return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
+                            Rounding);
 }
 
 //=== Helper functions for higher up the class hierarchy.
@@ -5375,6 +5452,12 @@
       return Op0;
     break;
   }
+  case Intrinsic::experimental_vector_reverse:
+    // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
+    if (match(Op0,
+              m_Intrinsic<Intrinsic::experimental_vector_reverse>(m_Value(X))))
+      return X;
+    break;
   default:
     break;
   }
@@ -5382,16 +5465,6 @@
   return nullptr;
 }
 
-static Intrinsic::ID getMaxMinOpposite(Intrinsic::ID IID) {
-  switch (IID) {
-  case Intrinsic::smax: return Intrinsic::smin;
-  case Intrinsic::smin: return Intrinsic::smax;
-  case Intrinsic::umax: return Intrinsic::umin;
-  case Intrinsic::umin: return Intrinsic::umax;
-  default: llvm_unreachable("Unexpected intrinsic");
-  }
-}
-
 static APInt getMaxMinLimit(Intrinsic::ID IID, unsigned BitWidth) {
   switch (IID) {
   case Intrinsic::smax: return APInt::getSignedMaxValue(BitWidth);
@@ -5431,7 +5504,7 @@
     if (IID0 == IID)
       return MM0;
     // max (min X, Y), X --> X
-    if (IID0 == getMaxMinOpposite(IID))
+    if (IID0 == getInverseMinMaxIntrinsic(IID))
       return Op1;
   }
   return nullptr;
@@ -5451,6 +5524,20 @@
       return Op0;
     break;
 
+  case Intrinsic::cttz: {
+    Value *X;
+    if (match(Op0, m_Shl(m_One(), m_Value(X))))
+      return X;
+    break;
+  }
+  case Intrinsic::ctlz: {
+    Value *X;
+    if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
+      return X;
+    if (match(Op0, m_AShr(m_Negative(), m_Value())))
+      return Constant::getNullValue(ReturnType);
+    break;
+  }
   case Intrinsic::smax:
   case Intrinsic::smin:
   case Intrinsic::umax:
@@ -5477,7 +5564,7 @@
       // If the constant op is the opposite of the limit value, the other must
       // be larger/smaller or equal. For example:
       // umin(i8 %x, i8 255) --> %x
-      if (*C == getMaxMinLimit(getMaxMinOpposite(IID), BitWidth))
+      if (*C == getMaxMinLimit(getInverseMinMaxIntrinsic(IID), BitWidth))
         return Op0;
 
       // Remove nested call if constant operands allow it. Example:
@@ -5663,6 +5750,19 @@
 
     break;
   }
+  case Intrinsic::experimental_vector_extract: {
+    Type *ReturnType = F->getReturnType();
+
+    // (extract_vector (insert_vector _, X, 0), 0) -> X
+    unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
+    Value *X = nullptr;
+    if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
+                       m_Value(), m_Value(X), m_Zero())) &&
+        IdxN == 0 && X->getType() == ReturnType)
+      return X;
+
+    break;
+  }
   default:
     break;
   }
@@ -5719,15 +5819,115 @@
     }
     return nullptr;
   }
+  case Intrinsic::experimental_constrained_fma: {
+    Value *Op0 = Call->getArgOperand(0);
+    Value *Op1 = Call->getArgOperand(1);
+    Value *Op2 = Call->getArgOperand(2);
+    auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
+    if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q,
+                                FPI->getExceptionBehavior().getValue(),
+                                FPI->getRoundingMode().getValue()))
+      return V;
+    return nullptr;
+  }
   case Intrinsic::fma:
   case Intrinsic::fmuladd: {
     Value *Op0 = Call->getArgOperand(0);
     Value *Op1 = Call->getArgOperand(1);
     Value *Op2 = Call->getArgOperand(2);
-    if (Value *V = simplifyFPOp({ Op0, Op1, Op2 }, {}, Q))
+    if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q, fp::ebIgnore,
+                                RoundingMode::NearestTiesToEven))
       return V;
     return nullptr;
   }
+  case Intrinsic::smul_fix:
+  case Intrinsic::smul_fix_sat: {
+    Value *Op0 = Call->getArgOperand(0);
+    Value *Op1 = Call->getArgOperand(1);
+    Value *Op2 = Call->getArgOperand(2);
+    Type *ReturnType = F->getReturnType();
+
+    // Canonicalize constant operand as Op1 (ConstantFolding handles the case
+    // when both Op0 and Op1 are constant so we do not care about that special
+    // case here).
+    if (isa<Constant>(Op0))
+      std::swap(Op0, Op1);
+
+    // X * 0 -> 0
+    if (match(Op1, m_Zero()))
+      return Constant::getNullValue(ReturnType);
+
+    // X * undef -> 0
+    if (Q.isUndefValue(Op1))
+      return Constant::getNullValue(ReturnType);
+
+    // X * (1 << Scale) -> X
+    APInt ScaledOne =
+        APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
+                            cast<ConstantInt>(Op2)->getZExtValue());
+    if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
+      return Op0;
+
+    return nullptr;
+  }
+  case Intrinsic::experimental_vector_insert: {
+    Value *Vec = Call->getArgOperand(0);
+    Value *SubVec = Call->getArgOperand(1);
+    Value *Idx = Call->getArgOperand(2);
+    Type *ReturnType = F->getReturnType();
+
+    // (insert_vector Y, (extract_vector X, 0), 0) -> X
+    // where: Y is X, or Y is undef
+    unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
+    Value *X = nullptr;
+    if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
+                          m_Value(X), m_Zero())) &&
+        (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
+        X->getType() == ReturnType)
+      return X;
+
+    return nullptr;
+  }
+  case Intrinsic::experimental_constrained_fadd: {
+    auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
+    return SimplifyFAddInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
+                            FPI->getFastMathFlags(), Q,
+                            FPI->getExceptionBehavior().getValue(),
+                            FPI->getRoundingMode().getValue());
+    break;
+  }
+  case Intrinsic::experimental_constrained_fsub: {
+    auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
+    return SimplifyFSubInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
+                            FPI->getFastMathFlags(), Q,
+                            FPI->getExceptionBehavior().getValue(),
+                            FPI->getRoundingMode().getValue());
+    break;
+  }
+  case Intrinsic::experimental_constrained_fmul: {
+    auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
+    return SimplifyFMulInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
+                            FPI->getFastMathFlags(), Q,
+                            FPI->getExceptionBehavior().getValue(),
+                            FPI->getRoundingMode().getValue());
+    break;
+  }
+  case Intrinsic::experimental_constrained_fdiv: {
+    auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
+    return SimplifyFDivInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
+                            FPI->getFastMathFlags(), Q,
+                            FPI->getExceptionBehavior().getValue(),
+                            FPI->getRoundingMode().getValue());
+    break;
+  }
+  case Intrinsic::experimental_constrained_frem: {
+    auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
+    return SimplifyFRemInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
+                            FPI->getFastMathFlags(), Q,
+                            FPI->getExceptionBehavior().getValue(),
+                            FPI->getRoundingMode().getValue());
+    break;
+  }
   default:
     return nullptr;
   }
@@ -5790,162 +5990,223 @@
   return ::SimplifyFreezeInst(Op0, Q);
 }
 
+static Constant *ConstructLoadOperandConstant(Value *Op) {
+  SmallVector<Value *, 4> Worklist;
+  // Invalid IR in unreachable code may contain self-referential values. Don't infinitely loop.
+  SmallPtrSet<Value *, 4> Visited;
+  Worklist.push_back(Op);
+  while (true) {
+    Value *CurOp = Worklist.back();
+    if (!Visited.insert(CurOp).second)
+      return nullptr;
+    if (isa<Constant>(CurOp))
+      break;
+    if (auto *BC = dyn_cast<BitCastOperator>(CurOp)) {
+      Worklist.push_back(BC->getOperand(0));
+    } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
+      for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
+        if (!isa<Constant>(GEP->getOperand(I)))
+          return nullptr;
+      }
+      Worklist.push_back(GEP->getOperand(0));
+    } else if (auto *II = dyn_cast<IntrinsicInst>(CurOp)) {
+      if (II->isLaunderOrStripInvariantGroup())
+        Worklist.push_back(II->getOperand(0));
+      else
+        return nullptr;
+    } else {
+      return nullptr;
+    }
+  }
+
+  Constant *NewOp = cast<Constant>(Worklist.pop_back_val());
+  while (!Worklist.empty()) {
+    Value *CurOp = Worklist.pop_back_val();
+    if (isa<BitCastOperator>(CurOp)) {
+      NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
+    } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
+      SmallVector<Constant *> Idxs;
+      Idxs.reserve(GEP->getNumOperands() - 1);
+      for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
+        Idxs.push_back(cast<Constant>(GEP->getOperand(I)));
+      }
+      NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp,
+                                             Idxs, GEP->isInBounds(),
+                                             GEP->getInRangeIndex());
+    } else {
+      assert(isa<IntrinsicInst>(CurOp) &&
+             cast<IntrinsicInst>(CurOp)->isLaunderOrStripInvariantGroup() &&
+             "expected invariant group intrinsic");
+      NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
+    }
+  }
+  return NewOp;
+}
+
+static Value *SimplifyLoadInst(LoadInst *LI, Value *PtrOp,
+                               const SimplifyQuery &Q) {
+  if (LI->isVolatile())
+    return nullptr;
+
+  // Try to make the load operand a constant, specifically handle
+  // invariant.group intrinsics.
+  auto *PtrOpC = dyn_cast<Constant>(PtrOp);
+  if (!PtrOpC)
+    PtrOpC = ConstructLoadOperandConstant(PtrOp);
+
+  if (PtrOpC)
+    return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
+
+  return nullptr;
+}
+
 /// See if we can compute a simplified version of this instruction.
 /// If not, this returns null.
 
-Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
-                                 OptimizationRemarkEmitter *ORE) {
+static Value *simplifyInstructionWithOperands(Instruction *I,
+                                              ArrayRef<Value *> NewOps,
+                                              const SimplifyQuery &SQ,
+                                              OptimizationRemarkEmitter *ORE) {
   const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
-  Value *Result;
+  Value *Result = nullptr;
 
   switch (I->getOpcode()) {
   default:
-    Result = ConstantFoldInstruction(I, Q.DL, Q.TLI);
+    if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
+      SmallVector<Constant *, 8> NewConstOps(NewOps.size());
+      transform(NewOps, NewConstOps.begin(),
+                [](Value *V) { return cast<Constant>(V); });
+      Result = ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
+    }
     break;
   case Instruction::FNeg:
-    Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q);
+    Result = SimplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q);
     break;
   case Instruction::FAdd:
-    Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
-                              I->getFastMathFlags(), Q);
+    Result = SimplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
     break;
   case Instruction::Add:
-    Result =
-        SimplifyAddInst(I->getOperand(0), I->getOperand(1),
-                        Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
-                        Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
+    Result = SimplifyAddInst(
+        NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
+        Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
     break;
   case Instruction::FSub:
-    Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
-                              I->getFastMathFlags(), Q);
+    Result = SimplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
     break;
   case Instruction::Sub:
-    Result =
-        SimplifySubInst(I->getOperand(0), I->getOperand(1),
-                        Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
-                        Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
+    Result = SimplifySubInst(
+        NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
+        Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
     break;
   case Instruction::FMul:
-    Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
-                              I->getFastMathFlags(), Q);
+    Result = SimplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
     break;
   case Instruction::Mul:
-    Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifyMulInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::SDiv:
-    Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifySDivInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::UDiv:
-    Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifyUDivInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::FDiv:
-    Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
-                              I->getFastMathFlags(), Q);
+    Result = SimplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
     break;
   case Instruction::SRem:
-    Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifySRemInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::URem:
-    Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifyURemInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::FRem:
-    Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
-                              I->getFastMathFlags(), Q);
+    Result = SimplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
     break;
   case Instruction::Shl:
-    Result =
-        SimplifyShlInst(I->getOperand(0), I->getOperand(1),
-                        Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
-                        Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
+    Result = SimplifyShlInst(
+        NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
+        Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
     break;
   case Instruction::LShr:
-    Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
+    Result = SimplifyLShrInst(NewOps[0], NewOps[1],
                               Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
     break;
   case Instruction::AShr:
-    Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
+    Result = SimplifyAShrInst(NewOps[0], NewOps[1],
                               Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
     break;
   case Instruction::And:
-    Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifyAndInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::Or:
-    Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifyOrInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::Xor:
-    Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifyXorInst(NewOps[0], NewOps[1], Q);
     break;
   case Instruction::ICmp:
-    Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
-                              I->getOperand(0), I->getOperand(1), Q);
+    Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
+                              NewOps[1], Q);
     break;
   case Instruction::FCmp:
-    Result =
-        SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
-                         I->getOperand(1), I->getFastMathFlags(), Q);
+    Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
+                              NewOps[1], I->getFastMathFlags(), Q);
     break;
   case Instruction::Select:
-    Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
-                                I->getOperand(2), Q);
+    Result = SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q);
     break;
   case Instruction::GetElementPtr: {
-    SmallVector<Value *, 8> Ops(I->operands());
     Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
-                             Ops, Q);
+                             NewOps, Q);
     break;
   }
   case Instruction::InsertValue: {
     InsertValueInst *IV = cast<InsertValueInst>(I);
-    Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
-                                     IV->getInsertedValueOperand(),
-                                     IV->getIndices(), Q);
+    Result = SimplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q);
     break;
   }
   case Instruction::InsertElement: {
-    auto *IE = cast<InsertElementInst>(I);
-    Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1),
-                                       IE->getOperand(2), Q);
+    Result = SimplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
     break;
   }
   case Instruction::ExtractValue: {
     auto *EVI = cast<ExtractValueInst>(I);
-    Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
-                                      EVI->getIndices(), Q);
+    Result = SimplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q);
     break;
   }
   case Instruction::ExtractElement: {
-    auto *EEI = cast<ExtractElementInst>(I);
-    Result = SimplifyExtractElementInst(EEI->getVectorOperand(),
-                                        EEI->getIndexOperand(), Q);
+    Result = SimplifyExtractElementInst(NewOps[0], NewOps[1], Q);
     break;
   }
   case Instruction::ShuffleVector: {
     auto *SVI = cast<ShuffleVectorInst>(I);
-    Result =
-        SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
-                                  SVI->getShuffleMask(), SVI->getType(), Q);
+    Result = SimplifyShuffleVectorInst(
+        NewOps[0], NewOps[1], SVI->getShuffleMask(), SVI->getType(), Q);
     break;
   }
   case Instruction::PHI:
-    Result = SimplifyPHINode(cast<PHINode>(I), Q);
+    Result = SimplifyPHINode(cast<PHINode>(I), NewOps, Q);
     break;
   case Instruction::Call: {
+    // TODO: Use NewOps
     Result = SimplifyCall(cast<CallInst>(I), Q);
     break;
   }
   case Instruction::Freeze:
-    Result = SimplifyFreezeInst(I->getOperand(0), Q);
+    Result = llvm::SimplifyFreezeInst(NewOps[0], Q);
     break;
 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
 #include "llvm/IR/Instruction.def"
 #undef HANDLE_CAST_INST
-    Result =
-        SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q);
+    Result = SimplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q);
     break;
   case Instruction::Alloca:
     // No simplifications for Alloca and it can't be constant folded.
     Result = nullptr;
     break;
+  case Instruction::Load:
+    Result = SimplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
+    break;
   }
 
   /// If called on unreachable code, the above logic may report that the
@@ -5954,6 +6215,21 @@
   return Result == I ? UndefValue::get(I->getType()) : Result;
 }
 
+Value *llvm::SimplifyInstructionWithOperands(Instruction *I,
+                                             ArrayRef<Value *> NewOps,
+                                             const SimplifyQuery &SQ,
+                                             OptimizationRemarkEmitter *ORE) {
+  assert(NewOps.size() == I->getNumOperands() &&
+         "Number of operands should match the instruction!");
+  return ::simplifyInstructionWithOperands(I, NewOps, SQ, ORE);
+}
+
+Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
+                                 OptimizationRemarkEmitter *ORE) {
+  SmallVector<Value *, 8> Ops(I->operands());
+  return ::simplifyInstructionWithOperands(I, Ops, SQ, ORE);
+}
+
 /// Implementation of recursive simplification through an instruction's
 /// uses.
 ///
diff --git a/src/llvm-project/llvm/lib/Analysis/LazyBlockFrequencyInfo.cpp b/src/llvm-project/llvm/lib/Analysis/LazyBlockFrequencyInfo.cpp
index 6107cac..636baf8 100644
--- a/src/llvm-project/llvm/lib/Analysis/LazyBlockFrequencyInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LazyBlockFrequencyInfo.cpp
@@ -45,8 +45,8 @@
   // We require DT so it's available when LI is available. The LI updating code
   // asserts that DT is also present so if we don't make sure that we have DT
   // here, that assert will trigger.
-  AU.addRequired<DominatorTreeWrapperPass>();
-  AU.addRequired<LoopInfoWrapperPass>();
+  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
+  AU.addRequiredTransitive<LoopInfoWrapperPass>();
   AU.setPreservesAll();
 }
 
@@ -61,8 +61,8 @@
 
 void LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AnalysisUsage &AU) {
   LazyBranchProbabilityInfoPass::getLazyBPIAnalysisUsage(AU);
-  AU.addRequired<LazyBlockFrequencyInfoPass>();
-  AU.addRequired<LoopInfoWrapperPass>();
+  AU.addRequiredTransitive<LazyBlockFrequencyInfoPass>();
+  AU.addRequiredTransitive<LoopInfoWrapperPass>();
 }
 
 void llvm::initializeLazyBFIPassPass(PassRegistry &Registry) {
diff --git a/src/llvm-project/llvm/lib/Analysis/LazyBranchProbabilityInfo.cpp b/src/llvm-project/llvm/lib/Analysis/LazyBranchProbabilityInfo.cpp
index 8369859..95de494 100644
--- a/src/llvm-project/llvm/lib/Analysis/LazyBranchProbabilityInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LazyBranchProbabilityInfo.cpp
@@ -46,9 +46,9 @@
   // We require DT so it's available when LI is available. The LI updating code
   // asserts that DT is also present so if we don't make sure that we have DT
   // here, that assert will trigger.
-  AU.addRequired<DominatorTreeWrapperPass>();
-  AU.addRequired<LoopInfoWrapperPass>();
-  AU.addRequired<TargetLibraryInfoWrapperPass>();
+  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
+  AU.addRequiredTransitive<LoopInfoWrapperPass>();
+  AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
   AU.setPreservesAll();
 }
 
@@ -63,9 +63,9 @@
 }
 
 void LazyBranchProbabilityInfoPass::getLazyBPIAnalysisUsage(AnalysisUsage &AU) {
-  AU.addRequired<LazyBranchProbabilityInfoPass>();
-  AU.addRequired<LoopInfoWrapperPass>();
-  AU.addRequired<TargetLibraryInfoWrapperPass>();
+  AU.addRequiredTransitive<LazyBranchProbabilityInfoPass>();
+  AU.addRequiredTransitive<LoopInfoWrapperPass>();
+  AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
 }
 
 void llvm::initializeLazyBPIPassPass(PassRegistry &Registry) {
diff --git a/src/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp b/src/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp
index f2c85a6..8f87552 100644
--- a/src/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LazyCallGraph.cpp
@@ -241,7 +241,7 @@
 }
 #endif
 
-#ifndef NDEBUG
+#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
 void LazyCallGraph::SCC::verify() {
   assert(OuterRefSCC && "Can't have a null RefSCC!");
   assert(!Nodes.empty() && "Can't have an empty SCC!");
@@ -333,7 +333,7 @@
 }
 #endif
 
-#ifndef NDEBUG
+#if !defined(NDEBUG) || defined(EXPENSIVE_CHECKS)
 void LazyCallGraph::RefSCC::verify() {
   assert(G && "Can't have a null graph!");
   assert(!SCCs.empty() && "Can't have an empty SCC!");
@@ -588,9 +588,7 @@
   assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
   SmallVector<SCC *, 1> DeletedSCCs;
 
-#ifndef NDEBUG
-  // In a debug build, verify the RefSCC is valid to start with and when this
-  // routine finishes.
+#ifdef EXPENSIVE_CHECKS
   verify();
   auto VerifyOnExit = make_scope_exit([&]() { verify(); });
 #endif
@@ -620,7 +618,7 @@
 
   // Compute the SCCs which (transitively) reach the source.
   auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
-#ifndef NDEBUG
+#ifdef EXPENSIVE_CHECKS
     // Check that the RefSCC is still valid before computing this as the
     // results will be nonsensical of we've broken its invariants.
     verify();
@@ -646,7 +644,7 @@
   // but because this is forward connectivity we just "recurse" through the
   // edges.
   auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
-#ifndef NDEBUG
+#ifdef EXPENSIVE_CHECKS
     // Check that the RefSCC is still valid before computing this as the
     // results will be nonsensical of we've broken its invariants.
     verify();
@@ -694,7 +692,7 @@
     return false; // No new cycle.
   }
 
-#ifndef NDEBUG
+#ifdef EXPENSIVE_CHECKS
   // Before merging, check that the RefSCC remains valid after all the
   // postorder updates.
   verify();
@@ -735,9 +733,7 @@
                                                            Node &TargetN) {
   assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
 
-#ifndef NDEBUG
-  // In a debug build, verify the RefSCC is valid to start with and when this
-  // routine finishes.
+#ifdef EXPENSIVE_CHECKS
   verify();
   auto VerifyOnExit = make_scope_exit([&]() { verify(); });
 #endif
@@ -757,9 +753,7 @@
 LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) {
   assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
 
-#ifndef NDEBUG
-  // In a debug build, verify the RefSCC is valid to start with and when this
-  // routine finishes.
+#ifdef EXPENSIVE_CHECKS
   verify();
   auto VerifyOnExit = make_scope_exit([&]() { verify(); });
 #endif
@@ -956,8 +950,7 @@
   // just flip the edge here.
   SourceN->setEdgeKind(TargetN, Edge::Call);
 
-#ifndef NDEBUG
-  // Check that the RefSCC is still valid.
+#ifdef EXPENSIVE_CHECKS
   verify();
 #endif
 }
@@ -978,8 +971,7 @@
   // just flip the edge here.
   SourceN->setEdgeKind(TargetN, Edge::Ref);
 
-#ifndef NDEBUG
-  // Check that the RefSCC is still valid.
+#ifdef EXPENSIVE_CHECKS
   verify();
 #endif
 }
@@ -991,8 +983,7 @@
 
   SourceN->insertEdgeInternal(TargetN, Edge::Ref);
 
-#ifndef NDEBUG
-  // Check that the RefSCC is still valid.
+#ifdef EXPENSIVE_CHECKS
   verify();
 #endif
 }
@@ -1011,8 +1002,7 @@
          "Target must be a descendant of the Source.");
 #endif
 
-#ifndef NDEBUG
-  // Check that the RefSCC is still valid.
+#ifdef EXPENSIVE_CHECKS
   verify();
 #endif
 }
@@ -1029,9 +1019,7 @@
 
   SmallVector<RefSCC *, 1> DeletedRefSCCs;
 
-#ifndef NDEBUG
-  // In a debug build, verify the RefSCC is valid to start with and when this
-  // routine finishes.
+#ifdef EXPENSIVE_CHECKS
   verify();
   auto VerifyOnExit = make_scope_exit([&]() { verify(); });
 #endif
@@ -1167,9 +1155,7 @@
   assert(G->lookupRefSCC(TargetN) != this &&
          "The target must not be a member of this RefSCC");
 
-#ifndef NDEBUG
-  // In a debug build, verify the RefSCC is valid to start with and when this
-  // routine finishes.
+#ifdef EXPENSIVE_CHECKS
   verify();
   auto VerifyOnExit = make_scope_exit([&]() { verify(); });
 #endif
@@ -1186,10 +1172,10 @@
   // We return a list of the resulting *new* RefSCCs in post-order.
   SmallVector<RefSCC *, 1> Result;
 
-#ifndef NDEBUG
-  // In a debug build, verify the RefSCC is valid to start with and that either
-  // we return an empty list of result RefSCCs and this RefSCC remains valid,
-  // or we return new RefSCCs and this RefSCC is dead.
+#ifdef EXPENSIVE_CHECKS
+  // Verify the RefSCC is valid to start with and that either we return an empty
+  // list of result RefSCCs and this RefSCC remains valid, or we return new
+  // RefSCCs and this RefSCC is dead.
   verify();
   auto VerifyOnExit = make_scope_exit([&]() {
     // If we didn't replace our RefSCC with new ones, check that this one
@@ -1407,7 +1393,7 @@
   SCCs.clear();
   SCCIndices.clear();
 
-#ifndef NDEBUG
+#ifdef EXPENSIVE_CHECKS
   // Verify the new RefSCCs we've built.
   for (RefSCC *RC : Result)
     RC->verify();
@@ -1419,11 +1405,9 @@
 
 void LazyCallGraph::RefSCC::insertTrivialCallEdge(Node &SourceN,
                                                   Node &TargetN) {
-#ifndef NDEBUG
-  // Check that the RefSCC is still valid when we finish.
+#ifdef EXPENSIVE_CHECKS
   auto ExitVerifier = make_scope_exit([this] { verify(); });
 
-#ifdef EXPENSIVE_CHECKS
   // Check that we aren't breaking some invariants of the SCC graph. Note that
   // this is quadratic in the number of edges in the call graph!
   SCC &SourceC = *G->lookupSCC(SourceN);
@@ -1431,8 +1415,7 @@
   if (&SourceC != &TargetC)
     assert(SourceC.isAncestorOf(TargetC) &&
            "Call edge is not trivial in the SCC graph!");
-#endif // EXPENSIVE_CHECKS
-#endif // NDEBUG
+#endif
 
   // First insert it into the source or find the existing edge.
   auto InsertResult =
@@ -1450,19 +1433,16 @@
 }
 
 void LazyCallGraph::RefSCC::insertTrivialRefEdge(Node &SourceN, Node &TargetN) {
-#ifndef NDEBUG
-  // Check that the RefSCC is still valid when we finish.
+#ifdef EXPENSIVE_CHECKS
   auto ExitVerifier = make_scope_exit([this] { verify(); });
 
-#ifdef EXPENSIVE_CHECKS
   // Check that we aren't breaking some invariants of the RefSCC graph.
   RefSCC &SourceRC = *G->lookupRefSCC(SourceN);
   RefSCC &TargetRC = *G->lookupRefSCC(TargetN);
   if (&SourceRC != &TargetRC)
     assert(SourceRC.isAncestorOf(TargetRC) &&
            "Ref edge is not trivial in the RefSCC graph!");
-#endif // EXPENSIVE_CHECKS
-#endif // NDEBUG
+#endif
 
   // First insert it into the source or find the existing edge.
   auto InsertResult =
@@ -1478,8 +1458,7 @@
 void LazyCallGraph::RefSCC::replaceNodeFunction(Node &N, Function &NewF) {
   Function &OldF = N.getFunction();
 
-#ifndef NDEBUG
-  // Check that the RefSCC is still valid when we finish.
+#ifdef EXPENSIVE_CHECKS
   auto ExitVerifier = make_scope_exit([this] { verify(); });
 
   assert(G->lookupRefSCC(N) == this &&
@@ -1638,7 +1617,7 @@
   SCC *OriginalC = lookupSCC(OriginalN);
   RefSCC *OriginalRC = lookupRefSCC(OriginalN);
 
-#ifndef NDEBUG
+#ifdef EXPENSIVE_CHECKS
   OriginalRC->verify();
   auto VerifyOnExit = make_scope_exit([&]() { OriginalRC->verify(); });
 #endif
@@ -1717,14 +1696,12 @@
   Node &OriginalN = get(OriginalFunction);
   RefSCC *OriginalRC = lookupRefSCC(OriginalN);
 
-#ifndef NDEBUG
+#ifdef EXPENSIVE_CHECKS
   OriginalRC->verify();
   auto VerifyOnExit = make_scope_exit([&]() {
     OriginalRC->verify();
-#ifdef EXPENSIVE_CHECKS
     for (Function *NewFunction : NewFunctions)
       lookupRefSCC(get(*NewFunction))->verify();
-#endif
   });
 #endif
 
@@ -1979,7 +1956,7 @@
         (void)Inserted;
         assert(Inserted && "Cannot already have this RefSCC in the index map!");
         PostOrderRefSCCs.push_back(NewRC);
-#ifndef NDEBUG
+#ifdef EXPENSIVE_CHECKS
         NewRC->verify();
 #endif
       });
diff --git a/src/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp b/src/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
index ba2b6fe..1dababa 100644
--- a/src/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -658,7 +658,7 @@
                            Val->getType()->getPointerAddressSpace()))
     return false;
 
-  Val = getUnderlyingObject(Val);
+  Val = Val->stripInBoundsOffsets();
   return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
     NonNullPointerSet NonNullPointers;
     for (Instruction &I : *BB)
@@ -673,7 +673,7 @@
 
   // If this is the entry block, we must be asking about an argument.  The
   // value is overdefined.
-  if (BB == &BB->getParent()->getEntryBlock()) {
+  if (BB->isEntryBlock()) {
     assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
     return ValueLatticeElement::getOverdefined();
   }
@@ -687,8 +687,8 @@
   // find a path to function entry.  TODO: We should consider explicitly
   // canonicalizing to make this true rather than relying on this happy
   // accident.
-  for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
-    Optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, *PI, BB);
+  for (BasicBlock *Pred : predecessors(BB)) {
+    Optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
     if (!EdgeResult)
       // Explore that input, then return here
       return None;
@@ -801,22 +801,12 @@
     return None;
   ValueLatticeElement &TrueVal = *OptTrueVal;
 
-  // If we hit overdefined, don't ask more queries.  We want to avoid poisoning
-  // extra slots in the table if we can.
-  if (TrueVal.isOverdefined())
-    return ValueLatticeElement::getOverdefined();
-
   Optional<ValueLatticeElement> OptFalseVal =
       getBlockValue(SI->getFalseValue(), BB);
   if (!OptFalseVal)
     return None;
   ValueLatticeElement &FalseVal = *OptFalseVal;
 
-  // If we hit overdefined, don't ask more queries.  We want to avoid poisoning
-  // extra slots in the table if we can.
-  if (FalseVal.isOverdefined())
-    return ValueLatticeElement::getOverdefined();
-
   if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
     const ConstantRange &TrueCR = TrueVal.getConstantRange();
     const ConstantRange &FalseCR = FalseVal.getConstantRange();
@@ -875,48 +865,6 @@
   FalseVal = intersect(FalseVal,
                        getValueFromCondition(SI->getFalseValue(), Cond, false));
 
-  // Handle clamp idioms such as:
-  //   %24 = constantrange<0, 17>
-  //   %39 = icmp eq i32 %24, 0
-  //   %40 = add i32 %24, -1
-  //   %siv.next = select i1 %39, i32 16, i32 %40
-  //   %siv.next = constantrange<0, 17> not <-1, 17>
-  // In general, this can handle any clamp idiom which tests the edge
-  // condition via an equality or inequality.
-  if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
-    ICmpInst::Predicate Pred = ICI->getPredicate();
-    Value *A = ICI->getOperand(0);
-    if (ConstantInt *CIBase = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
-      auto addConstants = [](ConstantInt *A, ConstantInt *B) {
-        assert(A->getType() == B->getType());
-        return ConstantInt::get(A->getType(), A->getValue() + B->getValue());
-      };
-      // See if either input is A + C2, subject to the constraint from the
-      // condition that A != C when that input is used.  We can assume that
-      // that input doesn't include C + C2.
-      ConstantInt *CIAdded;
-      switch (Pred) {
-      default: break;
-      case ICmpInst::ICMP_EQ:
-        if (match(SI->getFalseValue(), m_Add(m_Specific(A),
-                                             m_ConstantInt(CIAdded)))) {
-          auto ResNot = addConstants(CIBase, CIAdded);
-          FalseVal = intersect(FalseVal,
-                               ValueLatticeElement::getNot(ResNot));
-        }
-        break;
-      case ICmpInst::ICMP_NE:
-        if (match(SI->getTrueValue(), m_Add(m_Specific(A),
-                                            m_ConstantInt(CIAdded)))) {
-          auto ResNot = addConstants(CIBase, CIAdded);
-          TrueVal = intersect(TrueVal,
-                              ValueLatticeElement::getNot(ResNot));
-        }
-        break;
-      };
-    }
-  }
-
   ValueLatticeElement Result = TrueVal;
   Result.mergeIn(FalseVal);
   return Result;
@@ -1042,8 +990,8 @@
     IntrinsicInst *II, BasicBlock *BB) {
   if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
     LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
-                      << "' - overdefined (unknown intrinsic).\n");
-    return ValueLatticeElement::getOverdefined();
+                      << "' - unknown intrinsic.\n");
+    return getFromRangeMetadata(II);
   }
 
   SmallVector<ConstantRange, 2> OpRanges;
@@ -1076,15 +1024,25 @@
   return ValueLatticeElement::getOverdefined();
 }
 
-static bool matchICmpOperand(const APInt *&Offset, Value *LHS, Value *Val,
+static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
                              ICmpInst::Predicate Pred) {
   if (LHS == Val)
     return true;
 
   // Handle range checking idiom produced by InstCombine. We will subtract the
   // offset from the allowed range for RHS in this case.
-  if (match(LHS, m_Add(m_Specific(Val), m_APInt(Offset))))
+  const APInt *C;
+  if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) {
+    Offset = *C;
     return true;
+  }
+
+  // Handle the symmetric case. This appears in saturation patterns like
+  // (x == 16) ? 16 : (x + 1).
+  if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) {
+    Offset = -*C;
+    return true;
+  }
 
   // If (x | y) < C, then (x < C) && (y < C).
   if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
@@ -1101,7 +1059,7 @@
 
 /// Get value range for a "(Val + Offset) Pred RHS" condition.
 static ValueLatticeElement getValueFromSimpleICmpCondition(
-    CmpInst::Predicate Pred, Value *RHS, const APInt *Offset) {
+    CmpInst::Predicate Pred, Value *RHS, const APInt &Offset) {
   ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
                          /*isFullSet=*/true);
   if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
@@ -1112,11 +1070,7 @@
 
   ConstantRange TrueValues =
       ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
-
-  if (Offset)
-    TrueValues = TrueValues.subtract(*Offset);
-
-  return ValueLatticeElement::getRange(std::move(TrueValues));
+  return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
 }
 
 static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
@@ -1137,10 +1091,11 @@
     }
   }
 
-  if (!Val->getType()->isIntegerTy())
+  Type *Ty = Val->getType();
+  if (!Ty->isIntegerTy())
     return ValueLatticeElement::getOverdefined();
 
-  const APInt *Offset = nullptr;
+  APInt Offset(Ty->getScalarSizeInBits(), 0);
   if (matchICmpOperand(Offset, LHS, Val, EdgePred))
     return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset);
 
@@ -1148,17 +1103,27 @@
   if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
     return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset);
 
-  // If (Val & Mask) == C then all the masked bits are known and we can compute
-  // a value range based on that.
   const APInt *Mask, *C;
-  if (EdgePred == ICmpInst::ICMP_EQ &&
-      match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
+  if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
       match(RHS, m_APInt(C))) {
-    KnownBits Known;
-    Known.Zero = ~*C & *Mask;
-    Known.One = *C & *Mask;
-    return ValueLatticeElement::getRange(
-        ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
+    // If (Val & Mask) == C then all the masked bits are known and we can
+    // compute a value range based on that.
+    if (EdgePred == ICmpInst::ICMP_EQ) {
+      KnownBits Known;
+      Known.Zero = ~*C & *Mask;
+      Known.One = *C & *Mask;
+      return ValueLatticeElement::getRange(
+          ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
+    }
+    // If (Val & Mask) != 0 then the value must be larger than the lowest set
+    // bit of Mask.
+    if (EdgePred == ICmpInst::ICMP_NE && !Mask->isNullValue() &&
+        C->isNullValue()) {
+      unsigned BitWidth = Ty->getIntegerBitWidth();
+      return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
+          APInt::getOneBitSet(BitWidth, Mask->countTrailingZeros()),
+          APInt::getNullValue(BitWidth)));
+    }
   }
 
   return ValueLatticeElement::getOverdefined();
@@ -1186,20 +1151,20 @@
   return ValueLatticeElement::getRange(NWR);
 }
 
-static ValueLatticeElement
-getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
-                      SmallDenseMap<Value*, ValueLatticeElement> &Visited);
-
-static ValueLatticeElement
+static Optional<ValueLatticeElement>
 getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest,
-                          SmallDenseMap<Value*, ValueLatticeElement> &Visited) {
-  if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
-    return getValueFromICmpCondition(Val, ICI, isTrueDest);
+                          bool isRevisit,
+                          SmallDenseMap<Value *, ValueLatticeElement> &Visited,
+                          SmallVectorImpl<Value *> &Worklist) {
+  if (!isRevisit) {
+    if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
+      return getValueFromICmpCondition(Val, ICI, isTrueDest);
 
-  if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
-    if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
-      if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
-        return getValueFromOverflowCondition(Val, WO, isTrueDest);
+    if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
+      if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
+        if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
+          return getValueFromOverflowCondition(Val, WO, isTrueDest);
+  }
 
   Value *L, *R;
   bool IsAnd;
@@ -1210,46 +1175,63 @@
   else
     return ValueLatticeElement::getOverdefined();
 
-  // Prevent infinite recursion if Cond references itself as in this example:
-  //  Cond: "%tmp4 = and i1 %tmp4, undef"
-  //    BL: "%tmp4 = and i1 %tmp4, undef"
-  //    BR: "i1 undef"
-  if (L == Cond || R == Cond)
-    return ValueLatticeElement::getOverdefined();
+  auto LV = Visited.find(L);
+  auto RV = Visited.find(R);
 
   // if (L && R) -> intersect L and R
   // if (!(L || R)) -> intersect L and R
   // if (L || R) -> union L and R
   // if (!(L && R)) -> union L and R
-  if (isTrueDest ^ IsAnd) {
-    ValueLatticeElement V = getValueFromCondition(Val, L, isTrueDest, Visited);
+  if ((isTrueDest ^ IsAnd) && (LV != Visited.end())) {
+    ValueLatticeElement V = LV->second;
     if (V.isOverdefined())
       return V;
-    V.mergeIn(getValueFromCondition(Val, R, isTrueDest, Visited));
-    return V;
+    if (RV != Visited.end()) {
+      V.mergeIn(RV->second);
+      return V;
+    }
   }
 
-  return intersect(getValueFromCondition(Val, L, isTrueDest, Visited),
-                   getValueFromCondition(Val, R, isTrueDest, Visited));
-}
+  if (LV == Visited.end() || RV == Visited.end()) {
+    assert(!isRevisit);
+    if (LV == Visited.end())
+      Worklist.push_back(L);
+    if (RV == Visited.end())
+      Worklist.push_back(R);
+    return None;
+  }
 
-static ValueLatticeElement
-getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
-                      SmallDenseMap<Value*, ValueLatticeElement> &Visited) {
-  auto I = Visited.find(Cond);
-  if (I != Visited.end())
-    return I->second;
-
-  auto Result = getValueFromConditionImpl(Val, Cond, isTrueDest, Visited);
-  Visited[Cond] = Result;
-  return Result;
+  return intersect(LV->second, RV->second);
 }
 
 ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
                                           bool isTrueDest) {
   assert(Cond && "precondition");
   SmallDenseMap<Value*, ValueLatticeElement> Visited;
-  return getValueFromCondition(Val, Cond, isTrueDest, Visited);
+  SmallVector<Value *> Worklist;
+
+  Worklist.push_back(Cond);
+  do {
+    Value *CurrentCond = Worklist.back();
+    // Insert an Overdefined placeholder into the set to prevent
+    // infinite recursion if there exists IRs that use not
+    // dominated by its def as in this example:
+    //   "%tmp3 = or i1 undef, %tmp4"
+    //   "%tmp4 = or i1 undef, %tmp3"
+    auto Iter =
+        Visited.try_emplace(CurrentCond, ValueLatticeElement::getOverdefined());
+    bool isRevisit = !Iter.second;
+    Optional<ValueLatticeElement> Result = getValueFromConditionImpl(
+        Val, CurrentCond, isTrueDest, isRevisit, Visited, Worklist);
+    if (Result) {
+      Visited[CurrentCond] = *Result;
+      Worklist.pop_back();
+    }
+  } while (!Worklist.empty());
+
+  auto Result = Visited.find(Cond);
+  assert(Result != Visited.end());
+  return Result->second;
 }
 
 // Return true if Usr has Op as an operand, otherwise false.
@@ -1857,6 +1839,24 @@
   return Unknown;
 }
 
+LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS,
+                                                      Value *RHS,
+                                                      Instruction *CxtI,
+                                                      bool UseBlockValue) {
+  CmpInst::Predicate Pred = (CmpInst::Predicate)P;
+
+  if (auto *C = dyn_cast<Constant>(RHS))
+    return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
+  if (auto *C = dyn_cast<Constant>(LHS))
+    return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
+                          UseBlockValue);
+
+  // Got two non-Constant values. While we could handle them somewhat,
+  // by getting their constant ranges, and applying ConstantRange::icmp(),
+  // so far it did not appear to be profitable.
+  return LazyValueInfo::Unknown;
+}
+
 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
                                BasicBlock *NewSucc) {
   if (PImpl) {
diff --git a/src/llvm-project/llvm/lib/Analysis/LegacyDivergenceAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/LegacyDivergenceAnalysis.cpp
index 30eec5a..031bf3b 100644
--- a/src/llvm-project/llvm/lib/Analysis/LegacyDivergenceAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LegacyDivergenceAnalysis.cpp
@@ -339,7 +339,8 @@
   if (shouldUseGPUDivergenceAnalysis(F, TTI)) {
     // run the new GPU divergence analysis
     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
-    gpuDA = std::make_unique<GPUDivergenceAnalysis>(F, DT, PDT, LI, TTI);
+    gpuDA = std::make_unique<DivergenceInfo>(F, DT, PDT, LI, TTI,
+                                             /* KnownReducible  = */ true);
 
   } else {
     // run LLVM's existing DivergenceAnalysis
@@ -396,8 +397,7 @@
     OS << Arg << "\n";
   }
   // Iterate instructions using instructions() to ensure a deterministic order.
-  for (auto BI = F->begin(), BE = F->end(); BI != BE; ++BI) {
-    auto &BB = *BI;
+  for (const BasicBlock &BB : *F) {
     OS << "\n           " << BB.getName() << ":\n";
     for (auto &I : BB.instructionsWithoutDebug()) {
       OS << (isDivergent(&I) ? "DIVERGENT:     " : "               ");
diff --git a/src/llvm-project/llvm/lib/Analysis/Lint.cpp b/src/llvm-project/llvm/lib/Analysis/Lint.cpp
index e188c23..4de5e1e 100644
--- a/src/llvm-project/llvm/lib/Analysis/Lint.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/Lint.cpp
@@ -242,7 +242,8 @@
               continue;
             if (AI != BI && (*BI)->getType()->isPointerTy()) {
               AliasResult Result = AA->alias(*AI, *BI);
-              Assert(Result != MustAlias && Result != PartialAlias,
+              Assert(Result != AliasResult::MustAlias &&
+                         Result != AliasResult::PartialAlias,
                      "Unusual: noalias argument aliases another argument", &I);
             }
           }
@@ -302,7 +303,7 @@
         if (Len->getValue().isIntN(32))
           Size = LocationSize::precise(Len->getValue().getZExtValue());
       Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
-                 MustAlias,
+                 AliasResult::MustAlias,
              "Undefined behavior: memcpy source and destination overlap", &I);
       break;
     }
@@ -318,7 +319,8 @@
       // isn't expressive enough for what we really want to do. Known partial
       // overlap is not distinguished from the case where nothing is known.
       const LocationSize LS = LocationSize::precise(Size);
-      Assert(AA->alias(MCII->getSource(), LS, MCII->getDest(), LS) != MustAlias,
+      Assert(AA->alias(MCII->getSource(), LS, MCII->getDest(), LS) !=
+                 AliasResult::MustAlias,
              "Undefined behavior: memcpy source and destination overlap", &I);
       break;
     }
diff --git a/src/llvm-project/llvm/lib/Analysis/Loads.cpp b/src/llvm-project/llvm/lib/Analysis/Loads.cpp
index f9cb244..1c55f48 100644
--- a/src/llvm-project/llvm/lib/Analysis/Loads.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/Loads.cpp
@@ -12,11 +12,14 @@
 
 #include "llvm/Analysis/Loads.h"
 #include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumeBundleQueries.h"
 #include "llvm/Analysis/CaptureTracking.h"
 #include "llvm/Analysis/LoopInfo.h"
 #include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
 #include "llvm/Analysis/ValueTracking.h"
 #include "llvm/IR/DataLayout.h"
 #include "llvm/IR/GlobalAlias.h"
@@ -25,7 +28,6 @@
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/IR/Operator.h"
-#include "llvm/IR/Statepoint.h"
 
 using namespace llvm;
 
@@ -42,7 +44,8 @@
 static bool isDereferenceableAndAlignedPointer(
     const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL,
     const Instruction *CtxI, const DominatorTree *DT,
-    SmallPtrSetImpl<const Value *> &Visited, unsigned MaxDepth) {
+    const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited,
+    unsigned MaxDepth) {
   assert(V->getType()->isPointerTy() && "Base must be pointer");
 
   // Recursion limit.
@@ -56,17 +59,30 @@
   // Note that it is not safe to speculate into a malloc'd region because
   // malloc may return null.
 
+  // Recurse into both hands of select.
+  if (const SelectInst *Sel = dyn_cast<SelectInst>(V)) {
+    return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment,
+                                              Size, DL, CtxI, DT, TLI, Visited,
+                                              MaxDepth) &&
+           isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment,
+                                              Size, DL, CtxI, DT, TLI, Visited,
+                                              MaxDepth);
+  }
+
   // bitcast instructions are no-ops as far as dereferenceability is concerned.
   if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
     if (BC->getSrcTy()->isPointerTy())
       return isDereferenceableAndAlignedPointer(
-          BC->getOperand(0), Alignment, Size, DL, CtxI, DT, Visited, MaxDepth);
+          BC->getOperand(0), Alignment, Size, DL, CtxI, DT, TLI,
+          Visited, MaxDepth);
   }
 
-  bool CheckForNonNull = false;
+  bool CheckForNonNull, CheckForFreed;
   APInt KnownDerefBytes(Size.getBitWidth(),
-                        V->getPointerDereferenceableBytes(DL, CheckForNonNull));
-  if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size))
+                        V->getPointerDereferenceableBytes(DL, CheckForNonNull,
+                                                          CheckForFreed));
+  if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
+      !CheckForFreed)
     if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) {
       // As we recursed through GEPs to get here, we've incrementally checked
       // that each step advanced by a multiple of the alignment. If our base is
@@ -77,6 +93,31 @@
       return isAligned(V, Offset, Alignment, DL);
     }
 
+  if (CtxI) {
+    /// Look through assumes to see if both dereferencability and alignment can
+    /// be provent by an assume
+    RetainedKnowledge AlignRK;
+    RetainedKnowledge DerefRK;
+    if (getKnowledgeForValue(
+            V, {Attribute::Dereferenceable, Attribute::Alignment}, nullptr,
+            [&](RetainedKnowledge RK, Instruction *Assume, auto) {
+              if (!isValidAssumeForContext(Assume, CtxI))
+                return false;
+              if (RK.AttrKind == Attribute::Alignment)
+                AlignRK = std::max(AlignRK, RK);
+              if (RK.AttrKind == Attribute::Dereferenceable)
+                DerefRK = std::max(DerefRK, RK);
+              if (AlignRK && DerefRK && AlignRK.ArgValue >= Alignment.value() &&
+                  DerefRK.ArgValue >= Size.getZExtValue())
+                return true; // We have found what we needed so we stop looking
+              return false;  // Other assumes may have better information. so
+                             // keep looking
+            }))
+      return true;
+  }
+  /// TODO refactor this function to be able to search independently for
+  /// Dereferencability and Alignment requirements.
+
   // For GEPs, determine if the indexing lands within the allocated object.
   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
     const Value *Base = GEP->getPointerOperand();
@@ -97,22 +138,24 @@
     // addrspacecast, so we can't do arithmetic directly on the APInt values.
     return isDereferenceableAndAlignedPointer(
         Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL,
-        CtxI, DT, Visited, MaxDepth);
+        CtxI, DT, TLI, Visited, MaxDepth);
   }
 
   // For gc.relocate, look through relocations
   if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
-    return isDereferenceableAndAlignedPointer(
-      RelocateInst->getDerivedPtr(), Alignment, Size, DL, CtxI, DT, Visited, MaxDepth);
+    return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(),
+                                              Alignment, Size, DL, CtxI, DT,
+                                              TLI, Visited, MaxDepth);
 
   if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
     return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment,
-                                              Size, DL, CtxI, DT, Visited, MaxDepth);
+                                              Size, DL, CtxI, DT, TLI,
+                                              Visited, MaxDepth);
 
   if (const auto *Call = dyn_cast<CallBase>(V)) {
     if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
       return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI,
-                                                DT, Visited, MaxDepth);
+                                                DT, TLI, Visited, MaxDepth);
 
     // If we have a call we can't recurse through, check to see if this is an
     // allocation function for which we can establish an minimum object size.
@@ -128,19 +171,10 @@
     Opts.RoundToAlign = false;
     Opts.NullIsUnknownSize = true;
     uint64_t ObjSize;
-    // TODO: Plumb through TLI so that malloc routines and such working.
-    if (getObjectSize(V, ObjSize, DL, nullptr, Opts)) {
+    if (getObjectSize(V, ObjSize, DL, TLI, Opts)) {
       APInt KnownDerefBytes(Size.getBitWidth(), ObjSize);
       if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(Size) &&
-          isKnownNonZero(V, DL, 0, nullptr, CtxI, DT) &&
-          // TODO: We're currently inconsistent about whether deref(N) is a
-          // global fact or a point in time fact.  Once D61652 eventually
-          // lands, this check will be restricted to the point in time
-          // variant. For that variant, we need to prove that object hasn't
-          // been conditionally freed before ontext instruction - if it has, we
-          // might be hoisting over the inverse conditional and creating a
-          // dynamic use after free. 
-          !PointerMayBeCapturedBefore(V, true, true, CtxI, DT, true)) {
+          isKnownNonZero(V, DL, 0, nullptr, CtxI, DT) && !V->canBeFreed()) {
         // As we recursed through GEPs to get here, we've incrementally
         // checked that each step advanced by a multiple of the alignment. If
         // our base is properly aligned, then the original offset accessed
@@ -161,7 +195,8 @@
                                               const APInt &Size,
                                               const DataLayout &DL,
                                               const Instruction *CtxI,
-                                              const DominatorTree *DT) {
+                                              const DominatorTree *DT,
+                                              const TargetLibraryInfo *TLI) {
   // Note: At the moment, Size can be zero.  This ends up being interpreted as
   // a query of whether [Base, V] is dereferenceable and V is aligned (since
   // that's what the implementation happened to do).  It's unclear if this is
@@ -169,14 +204,15 @@
 
   SmallPtrSet<const Value *, 32> Visited;
   return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT,
-                                              Visited, 16);
+                                              TLI, Visited, 16);
 }
 
 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
                                               MaybeAlign MA,
                                               const DataLayout &DL,
                                               const Instruction *CtxI,
-                                              const DominatorTree *DT) {
+                                              const DominatorTree *DT,
+                                              const TargetLibraryInfo *TLI) {
   // For unsized types or scalable vectors we don't know exactly how many bytes
   // are dereferenced, so bail out.
   if (!Ty->isSized() || isa<ScalableVectorType>(Ty))
@@ -192,14 +228,15 @@
   APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()),
                    DL.getTypeStoreSize(Ty));
   return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI,
-                                            DT);
+                                            DT, TLI);
 }
 
 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
                                     const DataLayout &DL,
                                     const Instruction *CtxI,
-                                    const DominatorTree *DT) {
-  return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, DT);
+                                    const DominatorTree *DT,
+                                    const TargetLibraryInfo *TLI) {
+  return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, DT, TLI);
 }
 
 /// Test if A and B will obviously have the same value.
@@ -298,10 +335,11 @@
 bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size,
                                        const DataLayout &DL,
                                        Instruction *ScanFrom,
-                                       const DominatorTree *DT) {
+                                       const DominatorTree *DT,
+                                       const TargetLibraryInfo *TLI) {
   // If DT is not specified we can't make context-sensitive query
   const Instruction* CtxI = DT ? ScanFrom : nullptr;
-  if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT))
+  if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT, TLI))
     return true;
 
   if (!ScanFrom)
@@ -372,12 +410,13 @@
 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment,
                                        const DataLayout &DL,
                                        Instruction *ScanFrom,
-                                       const DominatorTree *DT) {
+                                       const DominatorTree *DT,
+                                       const TargetLibraryInfo *TLI) {
   APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
-  return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, DT);
+  return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, DT, TLI);
 }
 
-  /// DefMaxInstsToScan - the default number of maximum instructions
+/// DefMaxInstsToScan - the default number of maximum instructions
 /// to scan in the block, used by FindAvailableLoadedValue().
 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
 /// threading in part by eliminating partially redundant loads.
@@ -399,21 +438,24 @@
   if (!Load->isUnordered())
     return nullptr;
 
-  return FindAvailablePtrLoadStore(
-      Load->getPointerOperand(), Load->getType(), Load->isAtomic(), ScanBB,
-      ScanFrom, MaxInstsToScan, AA, IsLoad, NumScanedInst);
+  MemoryLocation Loc = MemoryLocation::get(Load);
+  return findAvailablePtrLoadStore(Loc, Load->getType(), Load->isAtomic(),
+                                   ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoad,
+                                   NumScanedInst);
 }
 
 // Check if the load and the store have the same base, constant offsets and
 // non-overlapping access ranges.
-static bool AreNonOverlapSameBaseLoadAndStore(
-    Value *LoadPtr, Type *LoadTy, Value *StorePtr, Type *StoreTy,
-    const DataLayout &DL) {
+static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr,
+                                              Type *LoadTy,
+                                              const Value *StorePtr,
+                                              Type *StoreTy,
+                                              const DataLayout &DL) {
   APInt LoadOffset(DL.getTypeSizeInBits(LoadPtr->getType()), 0);
   APInt StoreOffset(DL.getTypeSizeInBits(StorePtr->getType()), 0);
-  Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
+  const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets(
       DL, LoadOffset, /* AllowNonInbounds */ false);
-  Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
+  const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets(
       DL, StoreOffset, /* AllowNonInbounds */ false);
   if (LoadBase != StoreBase)
     return false;
@@ -426,21 +468,23 @@
   return LoadRange.intersectWith(StoreRange).isEmptySet();
 }
 
-static Value *getAvailableLoadStore(Instruction *Inst, Value *Ptr,
+static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr,
                                     Type *AccessTy, bool AtLeastAtomic,
                                     const DataLayout &DL, bool *IsLoadCSE) {
   // If this is a load of Ptr, the loaded value is available.
   // (This is true even if the load is volatile or atomic, although
   // those cases are unlikely.)
   if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
-    if (AreEquivalentAddressValues(
-            LI->getPointerOperand()->stripPointerCasts(), Ptr) &&
-        CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
-      // We can value forward from an atomic to a non-atomic, but not the
-      // other way around.
-      if (LI->isAtomic() < AtLeastAtomic)
-        return nullptr;
+    // We can value forward from an atomic to a non-atomic, but not the
+    // other way around.
+    if (LI->isAtomic() < AtLeastAtomic)
+      return nullptr;
 
+    Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts();
+    if (!AreEquivalentAddressValues(LoadPtr, Ptr))
+      return nullptr;
+
+    if (CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
       if (IsLoadCSE)
         *IsLoadCSE = true;
       return LI;
@@ -451,41 +495,44 @@
   // (This is true even if the store is volatile or atomic, although
   // those cases are unlikely.)
   if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
-    Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
-    if (AreEquivalentAddressValues(StorePtr, Ptr) &&
-        CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
-                                             AccessTy, DL)) {
-      // We can value forward from an atomic to a non-atomic, but not the
-      // other way around.
-      if (SI->isAtomic() < AtLeastAtomic)
-        return nullptr;
+    // We can value forward from an atomic to a non-atomic, but not the
+    // other way around.
+    if (SI->isAtomic() < AtLeastAtomic)
+      return nullptr;
 
-      if (IsLoadCSE)
-        *IsLoadCSE = false;
-      return SI->getOperand(0);
-    }
+    Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
+    if (!AreEquivalentAddressValues(StorePtr, Ptr))
+      return nullptr;
+
+    if (IsLoadCSE)
+      *IsLoadCSE = false;
+
+    Value *Val = SI->getValueOperand();
+    if (CastInst::isBitOrNoopPointerCastable(Val->getType(), AccessTy, DL))
+      return Val;
+
+    if (auto *C = dyn_cast<Constant>(Val))
+      return ConstantFoldLoadThroughBitcast(C, AccessTy, DL);
   }
 
   return nullptr;
 }
 
-Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
-                                       bool AtLeastAtomic, BasicBlock *ScanBB,
-                                       BasicBlock::iterator &ScanFrom,
-                                       unsigned MaxInstsToScan,
-                                       AAResults *AA, bool *IsLoadCSE,
-                                       unsigned *NumScanedInst) {
+Value *llvm::findAvailablePtrLoadStore(
+    const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic,
+    BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan,
+    AAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) {
   if (MaxInstsToScan == 0)
     MaxInstsToScan = ~0U;
 
   const DataLayout &DL = ScanBB->getModule()->getDataLayout();
-  Value *StrippedPtr = Ptr->stripPointerCasts();
+  const Value *StrippedPtr = Loc.Ptr->stripPointerCasts();
 
   while (ScanFrom != ScanBB->begin()) {
     // We must ignore debug info directives when counting (otherwise they
     // would affect codegen).
     Instruction *Inst = &*--ScanFrom;
-    if (isa<DbgInfoIntrinsic>(Inst))
+    if (Inst->isDebugOrPseudoInst())
       continue;
 
     // Restore ScanFrom to expected value in case next test succeeds
@@ -505,8 +552,6 @@
       return Available;
 
     // Try to get the store size for the type.
-    auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
-
     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
       Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
 
@@ -523,14 +568,14 @@
         // base, constant offsets and non-overlapping access ranges, ignore the
         // store. This is a simple form of alias analysis that is used by the
         // inliner. FIXME: use BasicAA if possible.
-        if (AreNonOverlapSameBaseLoadAndStore(
-                Ptr, AccessTy, SI->getPointerOperand(),
+        if (areNonOverlapSameBaseLoadAndStore(
+                Loc.Ptr, AccessTy, SI->getPointerOperand(),
                 SI->getValueOperand()->getType(), DL))
           continue;
       } else {
         // If we have alias analysis and it says the store won't modify the
         // loaded value, ignore the store.
-        if (!isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize)))
+        if (!isModSet(AA->getModRefInfo(SI, Loc)))
           continue;
       }
 
@@ -543,7 +588,7 @@
     if (Inst->mayWriteToMemory()) {
       // If alias analysis claims that it really won't modify the load,
       // ignore it.
-      if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize)))
+      if (AA && !isModSet(AA->getModRefInfo(Inst, Loc)))
         continue;
 
       // May modify the pointer, bail out.
@@ -575,7 +620,7 @@
   SmallVector<Instruction *> MustNotAliasInsts;
   for (Instruction &Inst : make_range(++Load->getReverseIterator(),
                                       ScanBB->rend())) {
-    if (isa<DbgInfoIntrinsic>(&Inst))
+    if (Inst.isDebugOrPseudoInst())
       continue;
 
     if (MaxInstsToScan-- == 0)
@@ -593,9 +638,9 @@
   // If we found an available value, ensure that the instructions in between
   // did not modify the memory location.
   if (Available) {
-    auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
+    MemoryLocation Loc = MemoryLocation::get(Load);
     for (Instruction *Inst : MustNotAliasInsts)
-      if (isModSet(AA.getModRefInfo(Inst, StrippedPtr, AccessSize)))
+      if (isModSet(AA.getModRefInfo(Inst, Loc)))
         return nullptr;
   }
 
diff --git a/src/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index e632fe2..a239928 100644
--- a/src/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -170,8 +170,10 @@
 
 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
     unsigned Index, RuntimePointerChecking &RtCheck)
-    : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
-      Low(RtCheck.Pointers[Index].Start) {
+    : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
+      AddressSpace(RtCheck.Pointers[Index]
+                       .PointerValue->getType()
+                       ->getPointerAddressSpace()) {
   Members.push_back(Index);
 }
 
@@ -199,9 +201,9 @@
   const SCEV *ScStart;
   const SCEV *ScEnd;
 
-  if (SE->isLoopInvariant(Sc, Lp))
+  if (SE->isLoopInvariant(Sc, Lp)) {
     ScStart = ScEnd = Sc;
-  else {
+  } else {
     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
     assert(AR && "Invalid addrec expression");
     const SCEV *Ex = PSE.getBackedgeTakenCount();
@@ -222,13 +224,13 @@
       ScStart = SE->getUMinExpr(ScStart, ScEnd);
       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
     }
-    // Add the size of the pointed element to ScEnd.
-    auto &DL = Lp->getHeader()->getModule()->getDataLayout();
-    Type *IdxTy = DL.getIndexType(Ptr->getType());
-    const SCEV *EltSizeSCEV =
-        SE->getStoreSizeOfExpr(IdxTy, Ptr->getType()->getPointerElementType());
-    ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
   }
+  // Add the size of the pointed element to ScEnd.
+  auto &DL = Lp->getHeader()->getModule()->getDataLayout();
+  Type *IdxTy = DL.getIndexType(Ptr->getType());
+  const SCEV *EltSizeSCEV =
+      SE->getStoreSizeOfExpr(IdxTy, Ptr->getType()->getPointerElementType());
+  ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
 
   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
 }
@@ -279,18 +281,28 @@
   return I;
 }
 
-bool RuntimeCheckingPtrGroup::addPointer(unsigned Index) {
-  const SCEV *Start = RtCheck.Pointers[Index].Start;
-  const SCEV *End = RtCheck.Pointers[Index].End;
+bool RuntimeCheckingPtrGroup::addPointer(unsigned Index,
+                                         RuntimePointerChecking &RtCheck) {
+  return addPointer(
+      Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
+      RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
+      *RtCheck.SE);
+}
+
+bool RuntimeCheckingPtrGroup::addPointer(unsigned Index, const SCEV *Start,
+                                         const SCEV *End, unsigned AS,
+                                         ScalarEvolution &SE) {
+  assert(AddressSpace == AS &&
+         "all pointers in a checking group must be in the same address space");
 
   // Compare the starts and ends with the known minimum and maximum
   // of this set. We need to know how we compare against the min/max
   // of the set in order to be able to emit memchecks.
-  const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE);
+  const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
   if (!Min0)
     return false;
 
-  const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE);
+  const SCEV *Min1 = getMinFromExprs(End, High, &SE);
   if (!Min1)
     return false;
 
@@ -410,7 +422,7 @@
 
         TotalComparisons++;
 
-        if (Group.addPointer(Pointer)) {
+        if (Group.addPointer(Pointer, *this)) {
           Merged = true;
           break;
         }
@@ -1124,139 +1136,130 @@
   return Stride;
 }
 
-bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
-                           ScalarEvolution &SE,
+Optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB,
+                                    Value *PtrB, const DataLayout &DL,
+                                    ScalarEvolution &SE, bool StrictCheck,
+                                    bool CheckType) {
+  assert(PtrA && PtrB && "Expected non-nullptr pointers.");
+  assert(cast<PointerType>(PtrA->getType())
+             ->isOpaqueOrPointeeTypeMatches(ElemTyA) && "Wrong PtrA type");
+  assert(cast<PointerType>(PtrB->getType())
+             ->isOpaqueOrPointeeTypeMatches(ElemTyB) && "Wrong PtrB type");
+
+  // Make sure that A and B are different pointers.
+  if (PtrA == PtrB)
+    return 0;
+
+  // Make sure that the element types are the same if required.
+  if (CheckType && ElemTyA != ElemTyB)
+    return None;
+
+  unsigned ASA = PtrA->getType()->getPointerAddressSpace();
+  unsigned ASB = PtrB->getType()->getPointerAddressSpace();
+
+  // Check that the address spaces match.
+  if (ASA != ASB)
+    return None;
+  unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
+
+  APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
+  Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
+  Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
+
+  int Val;
+  if (PtrA1 == PtrB1) {
+    // Retrieve the address space again as pointer stripping now tracks through
+    // `addrspacecast`.
+    ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
+    ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
+    // Check that the address spaces match and that the pointers are valid.
+    if (ASA != ASB)
+      return None;
+
+    IdxWidth = DL.getIndexSizeInBits(ASA);
+    OffsetA = OffsetA.sextOrTrunc(IdxWidth);
+    OffsetB = OffsetB.sextOrTrunc(IdxWidth);
+
+    OffsetB -= OffsetA;
+    Val = OffsetB.getSExtValue();
+  } else {
+    // Otherwise compute the distance with SCEV between the base pointers.
+    const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
+    const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
+    const auto *Diff =
+        dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
+    if (!Diff)
+      return None;
+    Val = Diff->getAPInt().getSExtValue();
+  }
+  int Size = DL.getTypeStoreSize(ElemTyA);
+  int Dist = Val / Size;
+
+  // Ensure that the calculated distance matches the type-based one after all
+  // the bitcasts removal in the provided pointers.
+  if (!StrictCheck || Dist * Size == Val)
+    return Dist;
+  return None;
+}
+
+bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, Type *ElemTy,
+                           const DataLayout &DL, ScalarEvolution &SE,
                            SmallVectorImpl<unsigned> &SortedIndices) {
   assert(llvm::all_of(
              VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
          "Expected list of pointer operands.");
-  SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
-  OffValPairs.reserve(VL.size());
-
   // Walk over the pointers, and map each of them to an offset relative to
   // first pointer in the array.
   Value *Ptr0 = VL[0];
-  const SCEV *Scev0 = SE.getSCEV(Ptr0);
-  Value *Obj0 = getUnderlyingObject(Ptr0);
 
-  llvm::SmallSet<int64_t, 4> Offsets;
-  for (auto *Ptr : VL) {
-    // TODO: Outline this code as a special, more time consuming, version of
-    // computeConstantDifference() function.
-    if (Ptr->getType()->getPointerAddressSpace() !=
-        Ptr0->getType()->getPointerAddressSpace())
-      return false;
-    // If a pointer refers to a different underlying object, bail - the
-    // pointers are by definition incomparable.
-    Value *CurrObj = getUnderlyingObject(Ptr);
-    if (CurrObj != Obj0)
-      return false;
-
-    const SCEV *Scev = SE.getSCEV(Ptr);
-    const auto *Diff = dyn_cast<SCEVConstant>(SE.getMinusSCEV(Scev, Scev0));
-    // The pointers may not have a constant offset from each other, or SCEV
-    // may just not be smart enough to figure out they do. Regardless,
-    // there's nothing we can do.
+  using DistOrdPair = std::pair<int64_t, int>;
+  auto Compare = [](const DistOrdPair &L, const DistOrdPair &R) {
+    return L.first < R.first;
+  };
+  std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
+  Offsets.emplace(0, 0);
+  int Cnt = 1;
+  bool IsConsecutive = true;
+  for (auto *Ptr : VL.drop_front()) {
+    Optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
+                                         /*StrictCheck=*/true);
     if (!Diff)
       return false;
 
     // Check if the pointer with the same offset is found.
-    int64_t Offset = Diff->getAPInt().getSExtValue();
-    if (!Offsets.insert(Offset).second)
+    int64_t Offset = *Diff;
+    auto Res = Offsets.emplace(Offset, Cnt);
+    if (!Res.second)
       return false;
-    OffValPairs.emplace_back(Offset, Ptr);
+    // Consecutive order if the inserted element is the last one.
+    IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
+    ++Cnt;
   }
   SortedIndices.clear();
-  SortedIndices.resize(VL.size());
-  std::iota(SortedIndices.begin(), SortedIndices.end(), 0);
-
-  // Sort the memory accesses and keep the order of their uses in UseOrder.
-  llvm::stable_sort(SortedIndices, [&](unsigned Left, unsigned Right) {
-    return OffValPairs[Left].first < OffValPairs[Right].first;
-  });
-
-  // Check if the order is consecutive already.
-  if (llvm::all_of(SortedIndices, [&SortedIndices](const unsigned I) {
-        return I == SortedIndices[I];
-      }))
-    SortedIndices.clear();
-
+  if (!IsConsecutive) {
+    // Fill SortedIndices array only if it is non-consecutive.
+    SortedIndices.resize(VL.size());
+    Cnt = 0;
+    for (const std::pair<int64_t, int> &Pair : Offsets) {
+      SortedIndices[Cnt] = Pair.second;
+      ++Cnt;
+    }
+  }
   return true;
 }
 
-/// Take the address space operand from the Load/Store instruction.
-/// Returns -1 if this is not a valid Load/Store instruction.
-static unsigned getAddressSpaceOperand(Value *I) {
-  if (LoadInst *L = dyn_cast<LoadInst>(I))
-    return L->getPointerAddressSpace();
-  if (StoreInst *S = dyn_cast<StoreInst>(I))
-    return S->getPointerAddressSpace();
-  return -1;
-}
-
 /// Returns true if the memory operations \p A and \p B are consecutive.
 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
                                ScalarEvolution &SE, bool CheckType) {
   Value *PtrA = getLoadStorePointerOperand(A);
   Value *PtrB = getLoadStorePointerOperand(B);
-  unsigned ASA = getAddressSpaceOperand(A);
-  unsigned ASB = getAddressSpaceOperand(B);
-
-  // Check that the address spaces match and that the pointers are valid.
-  if (!PtrA || !PtrB || (ASA != ASB))
+  if (!PtrA || !PtrB)
     return false;
-
-  // Make sure that A and B are different pointers.
-  if (PtrA == PtrB)
-    return false;
-
-  // Make sure that A and B have the same type if required.
-  if (CheckType && PtrA->getType() != PtrB->getType())
-    return false;
-
-  unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
-  Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
-
-  APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
-  PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
-  PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
-
-  // Retrieve the address space again as pointer stripping now tracks through
-  // `addrspacecast`.
-  ASA = cast<PointerType>(PtrA->getType())->getAddressSpace();
-  ASB = cast<PointerType>(PtrB->getType())->getAddressSpace();
-  // Check that the address spaces match and that the pointers are valid.
-  if (ASA != ASB)
-    return false;
-
-  IdxWidth = DL.getIndexSizeInBits(ASA);
-  OffsetA = OffsetA.sextOrTrunc(IdxWidth);
-  OffsetB = OffsetB.sextOrTrunc(IdxWidth);
-
-  APInt Size(IdxWidth, DL.getTypeStoreSize(Ty));
-
-  //  OffsetDelta = OffsetB - OffsetA;
-  const SCEV *OffsetSCEVA = SE.getConstant(OffsetA);
-  const SCEV *OffsetSCEVB = SE.getConstant(OffsetB);
-  const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA);
-  const APInt &OffsetDelta = cast<SCEVConstant>(OffsetDeltaSCEV)->getAPInt();
-
-  // Check if they are based on the same pointer. That makes the offsets
-  // sufficient.
-  if (PtrA == PtrB)
-    return OffsetDelta == Size;
-
-  // Compute the necessary base pointer delta to have the necessary final delta
-  // equal to the size.
-  // BaseDelta = Size - OffsetDelta;
-  const SCEV *SizeSCEV = SE.getConstant(Size);
-  const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV);
-
-  // Otherwise compute the distance with SCEV between the base pointers.
-  const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
-  const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
-  const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta);
-  return X == PtrSCEVB;
+  Type *ElemTyA = getLoadStoreType(A);
+  Type *ElemTyB = getLoadStoreType(B);
+  Optional<int> Diff = getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
+                                       /*StrictCheck=*/true, CheckType);
+  return Diff && *Diff == 1;
 }
 
 MemoryDepChecker::VectorizationSafetyStatus
@@ -1523,7 +1526,8 @@
   uint64_t Stride = std::abs(StrideAPtr);
   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
   if (!C) {
-    if (TypeByteSize == DL.getTypeAllocSize(BTy) &&
+    if (!isa<SCEVCouldNotCompute>(Dist) &&
+        TypeByteSize == DL.getTypeAllocSize(BTy) &&
         isSafeDependenceDistance(DL, *(PSE.getSE()),
                                  *(PSE.getBackedgeTakenCount()), *Dist, Stride,
                                  TypeByteSize))
@@ -2276,12 +2280,12 @@
 }
 
 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
-    AU.addRequired<ScalarEvolutionWrapperPass>();
-    AU.addRequired<AAResultsWrapperPass>();
-    AU.addRequired<DominatorTreeWrapperPass>();
-    AU.addRequired<LoopInfoWrapperPass>();
+  AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
+  AU.addRequiredTransitive<AAResultsWrapperPass>();
+  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
+  AU.addRequiredTransitive<LoopInfoWrapperPass>();
 
-    AU.setPreservesAll();
+  AU.setPreservesAll();
 }
 
 char LoopAccessLegacyAnalysis::ID = 0;
diff --git a/src/llvm-project/llvm/lib/Analysis/LoopAnalysisManager.cpp b/src/llvm-project/llvm/lib/Analysis/LoopAnalysisManager.cpp
index 4ad5641..4d6f8a6 100644
--- a/src/llvm-project/llvm/lib/Analysis/LoopAnalysisManager.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LoopAnalysisManager.cpp
@@ -143,11 +143,5 @@
   PA.preserve<LoopAnalysis>();
   PA.preserve<LoopAnalysisManagerFunctionProxy>();
   PA.preserve<ScalarEvolutionAnalysis>();
-  // FIXME: What we really want to do here is preserve an AA category, but that
-  // concept doesn't exist yet.
-  PA.preserve<AAManager>();
-  PA.preserve<BasicAA>();
-  PA.preserve<GlobalsAA>();
-  PA.preserve<SCEVAA>();
   return PA;
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp
index cf68596..8a61364 100644
--- a/src/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LoopCacheAnalysis.cpp
@@ -109,9 +109,7 @@
   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount) ||
       !isa<SCEVConstant>(BackedgeTakenCount))
     return nullptr;
-
-  return SE.getAddExpr(BackedgeTakenCount,
-                       SE.getOne(BackedgeTakenCount->getType()));
+  return SE.getTripCountFromExitCount(BackedgeTakenCount);
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/src/llvm-project/llvm/lib/Analysis/LoopInfo.cpp b/src/llvm-project/llvm/lib/Analysis/LoopInfo.cpp
index a85869b..66aab4c 100644
--- a/src/llvm-project/llvm/lib/Analysis/LoopInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LoopInfo.cpp
@@ -20,6 +20,7 @@
 #include "llvm/Analysis/IVDescriptors.h"
 #include "llvm/Analysis/LoopInfoImpl.h"
 #include "llvm/Analysis/LoopIterator.h"
+#include "llvm/Analysis/LoopNestAnalysis.h"
 #include "llvm/Analysis/MemorySSA.h"
 #include "llvm/Analysis/MemorySSAUpdater.h"
 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
@@ -170,8 +171,8 @@
 }
 
 /// Get the latch condition instruction.
-static ICmpInst *getLatchCmpInst(const Loop &L) {
-  if (BasicBlock *Latch = L.getLoopLatch())
+ICmpInst *Loop::getLatchCmpInst() const {
+  if (BasicBlock *Latch = getLoopLatch())
     if (BranchInst *BI = dyn_cast_or_null<BranchInst>(Latch->getTerminator()))
       if (BI->isConditional())
         return dyn_cast<ICmpInst>(BI->getCondition());
@@ -182,7 +183,7 @@
 /// Return the final value of the loop induction variable if found.
 static Value *findFinalIVValue(const Loop &L, const PHINode &IndVar,
                                const Instruction &StepInst) {
-  ICmpInst *LatchCmpInst = getLatchCmpInst(L);
+  ICmpInst *LatchCmpInst = L.getLatchCmpInst();
   if (!LatchCmpInst)
     return nullptr;
 
@@ -296,7 +297,7 @@
 
   BasicBlock *Header = getHeader();
   assert(Header && "Expected a valid loop header");
-  ICmpInst *CmpInst = getLatchCmpInst(*this);
+  ICmpInst *CmpInst = getLatchCmpInst();
   if (!CmpInst)
     return nullptr;
 
@@ -380,10 +381,6 @@
   if (!ExitFromLatch)
     return nullptr;
 
-  BasicBlock *ExitFromLatchSucc = ExitFromLatch->getUniqueSuccessor();
-  if (!ExitFromLatchSucc)
-    return nullptr;
-
   BasicBlock *GuardBB = Preheader->getUniquePredecessor();
   if (!GuardBB)
     return nullptr;
@@ -397,7 +394,17 @@
   BasicBlock *GuardOtherSucc = (GuardBI->getSuccessor(0) == Preheader)
                                    ? GuardBI->getSuccessor(1)
                                    : GuardBI->getSuccessor(0);
-  return (GuardOtherSucc == ExitFromLatchSucc) ? GuardBI : nullptr;
+
+  // Check if ExitFromLatch (or any BasicBlock which is an empty unique
+  // successor of ExitFromLatch) is equal to GuardOtherSucc. If
+  // skipEmptyBlockUntil returns GuardOtherSucc, then the guard branch for the
+  // loop is GuardBI (return GuardBI), otherwise return nullptr.
+  if (&LoopNest::skipEmptyBlockUntil(ExitFromLatch, GuardOtherSucc,
+                                     /*CheckUniquePred=*/true) ==
+      GuardOtherSucc)
+    return GuardBI;
+  else
+    return nullptr;
 }
 
 bool Loop::isCanonical(ScalarEvolution &SE) const {
@@ -616,15 +623,7 @@
       if (!LoopIdMD)
         return false;
 
-      bool LoopIdMDFound = false;
-      for (const MDOperand &MDOp : LoopIdMD->operands()) {
-        if (MDOp == DesiredLoopIdMetadata) {
-          LoopIdMDFound = true;
-          break;
-        }
-      }
-
-      if (!LoopIdMDFound)
+      if (!llvm::is_contained(LoopIdMD->operands(), DesiredLoopIdMetadata))
         return false;
     }
   }
@@ -670,7 +669,7 @@
 LLVM_DUMP_METHOD void Loop::dump() const { print(dbgs()); }
 
 LLVM_DUMP_METHOD void Loop::dumpVerbose() const {
-  print(dbgs(), /*Depth=*/0, /*Verbose=*/true);
+  print(dbgs(), /*Verbose=*/true);
 }
 #endif
 
@@ -765,9 +764,8 @@
 void UnloopUpdater::removeBlocksFromAncestors() {
   // Remove all unloop's blocks (including those in nested subloops) from
   // ancestors below the new parent loop.
-  for (Loop::block_iterator BI = Unloop.block_begin(), BE = Unloop.block_end();
-       BI != BE; ++BI) {
-    Loop *OuterParent = LI->getLoopFor(*BI);
+  for (BasicBlock *BB : Unloop.blocks()) {
+    Loop *OuterParent = LI->getLoopFor(BB);
     if (Unloop.contains(OuterParent)) {
       while (OuterParent->getParentLoop() != &Unloop)
         OuterParent = OuterParent->getParentLoop();
@@ -778,7 +776,7 @@
     for (Loop *OldParent = Unloop.getParentLoop(); OldParent != OuterParent;
          OldParent = OldParent->getParentLoop()) {
       assert(OldParent && "new loop is not an ancestor of the original");
-      OldParent->removeBlockFromLoop(*BI);
+      OldParent->removeBlockFromLoop(BB);
     }
   }
 }
@@ -885,17 +883,14 @@
   // First handle the special case of no parent loop to simplify the algorithm.
   if (Unloop->isOutermost()) {
     // Since BBLoop had no parent, Unloop blocks are no longer in a loop.
-    for (Loop::block_iterator I = Unloop->block_begin(),
-                              E = Unloop->block_end();
-         I != E; ++I) {
-
+    for (BasicBlock *BB : Unloop->blocks()) {
       // Don't reparent blocks in subloops.
-      if (getLoopFor(*I) != Unloop)
+      if (getLoopFor(BB) != Unloop)
         continue;
 
       // Blocks no longer have a parent but are still referenced by Unloop until
       // the Unloop object is deleted.
-      changeLoopFor(*I, nullptr);
+      changeLoopFor(BB, nullptr);
     }
 
     // Remove the loop from the top-level LoopInfo object.
@@ -936,6 +931,31 @@
   }
 }
 
+bool
+LoopInfo::wouldBeOutOfLoopUseRequiringLCSSA(const Value *V,
+                                            const BasicBlock *ExitBB) const {
+  if (V->getType()->isTokenTy())
+    // We can't form PHIs of token type, so the definition of LCSSA excludes
+    // values of that type.
+    return false;
+
+  const Instruction *I = dyn_cast<Instruction>(V);
+  if (!I)
+    return false;
+  const Loop *L = getLoopFor(I->getParent());
+  if (!L)
+    return false;
+  if (L->contains(ExitBB))
+    // Could be an exit bb of a subloop and contained in defining loop
+    return false;
+
+  // We found a (new) out-of-loop use location, for a value defined in-loop.
+  // (Note that because of LCSSA, we don't have to account for values defined
+  // in sibling loops.  Such values will have LCSSA phis of their own in the
+  // common parent loop.)
+  return true;
+}
+
 AnalysisKey LoopAnalysis::Key;
 
 LoopInfo LoopAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
@@ -1026,6 +1046,72 @@
   return findOptionMDForLoopID(TheLoop->getLoopID(), Name);
 }
 
+/// Find string metadata for loop
+///
+/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
+/// operand or null otherwise.  If the string metadata is not found return
+/// Optional's not-a-value.
+Optional<const MDOperand *> llvm::findStringMetadataForLoop(const Loop *TheLoop,
+                                                            StringRef Name) {
+  MDNode *MD = findOptionMDForLoop(TheLoop, Name);
+  if (!MD)
+    return None;
+  switch (MD->getNumOperands()) {
+  case 1:
+    return nullptr;
+  case 2:
+    return &MD->getOperand(1);
+  default:
+    llvm_unreachable("loop metadata has 0 or 1 operand");
+  }
+}
+
+Optional<bool> llvm::getOptionalBoolLoopAttribute(const Loop *TheLoop,
+                                                  StringRef Name) {
+  MDNode *MD = findOptionMDForLoop(TheLoop, Name);
+  if (!MD)
+    return None;
+  switch (MD->getNumOperands()) {
+  case 1:
+    // When the value is absent it is interpreted as 'attribute set'.
+    return true;
+  case 2:
+    if (ConstantInt *IntMD =
+            mdconst::extract_or_null<ConstantInt>(MD->getOperand(1).get()))
+      return IntMD->getZExtValue();
+    return true;
+  }
+  llvm_unreachable("unexpected number of options");
+}
+
+bool llvm::getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name) {
+  return getOptionalBoolLoopAttribute(TheLoop, Name).getValueOr(false);
+}
+
+llvm::Optional<int> llvm::getOptionalIntLoopAttribute(const Loop *TheLoop,
+                                                      StringRef Name) {
+  const MDOperand *AttrMD =
+      findStringMetadataForLoop(TheLoop, Name).getValueOr(nullptr);
+  if (!AttrMD)
+    return None;
+
+  ConstantInt *IntMD = mdconst::extract_or_null<ConstantInt>(AttrMD->get());
+  if (!IntMD)
+    return None;
+
+  return IntMD->getSExtValue();
+}
+
+static const char *LLVMLoopMustProgress = "llvm.loop.mustprogress";
+
+bool llvm::hasMustProgress(const Loop *L) {
+  return getBooleanLoopAttribute(L, LLVMLoopMustProgress);
+}
+
+bool llvm::isMustProgress(const Loop *L) {
+  return L->getHeader()->getParent()->mustProgress() || hasMustProgress(L);
+}
+
 bool llvm::isValidAsAccessGroup(MDNode *Node) {
   return Node->getNumOperands() == 0 && Node->isDistinct();
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/LoopNestAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/LoopNestAnalysis.cpp
index 7133abc..2649ed6 100644
--- a/src/llvm-project/llvm/lib/Analysis/LoopNestAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LoopNestAnalysis.cpp
@@ -206,11 +206,12 @@
 }
 
 const BasicBlock &LoopNest::skipEmptyBlockUntil(const BasicBlock *From,
-                                                const BasicBlock *End) {
+                                                const BasicBlock *End,
+                                                bool CheckUniquePred) {
   assert(From && "Expecting valid From");
   assert(End && "Expecting valid End");
 
-  if (From == End || !From->getSingleSuccessor())
+  if (From == End || !From->getUniqueSuccessor())
     return *From;
 
   auto IsEmpty = [](const BasicBlock *BB) {
@@ -219,12 +220,13 @@
 
   // Visited is used to avoid running into an infinite loop.
   SmallPtrSet<const BasicBlock *, 4> Visited;
-  const BasicBlock *BB = From->getSingleSuccessor();
-  const BasicBlock *PredBB = BB;
-  while (BB && BB != End && IsEmpty(BB) && !Visited.count(BB)) {
+  const BasicBlock *BB = From->getUniqueSuccessor();
+  const BasicBlock *PredBB = From;
+  while (BB && BB != End && IsEmpty(BB) && !Visited.count(BB) &&
+         (!CheckUniquePred || BB->getUniquePredecessor())) {
     Visited.insert(BB);
     PredBB = BB;
-    BB = BB->getSingleSuccessor();
+    BB = BB->getUniqueSuccessor();
   }
 
   return (BB == End) ? *End : *PredBB;
@@ -335,9 +337,11 @@
 
   // Ensure the inner loop exit block lead to the outer loop latch possibly
   // through empty blocks.
-  const BasicBlock &SuccInner =
-      LoopNest::skipEmptyBlockUntil(InnerLoop.getExitBlock(), OuterLoopLatch);
-  if (&SuccInner != OuterLoopLatch && &SuccInner != ExtraPhiBlock) {
+  if ((!ExtraPhiBlock ||
+       &LoopNest::skipEmptyBlockUntil(InnerLoop.getExitBlock(),
+                                      ExtraPhiBlock) != ExtraPhiBlock) &&
+      (&LoopNest::skipEmptyBlockUntil(InnerLoop.getExitBlock(),
+                                      OuterLoopLatch) != OuterLoopLatch)) {
     DEBUG_WITH_TYPE(
         VerboseDebug,
         dbgs() << "Inner loop exit block " << *InnerLoopExit
diff --git a/src/llvm-project/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp b/src/llvm-project/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp
index b04cc46..15095d6 100644
--- a/src/llvm-project/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/LoopUnrollAnalyzer.cpp
@@ -35,6 +35,11 @@
     return true;
   }
 
+  // If we have a loop invariant computation, we only need to compute it once.
+  // Given that, all but the first occurance are free.
+  if (!IterationNumber->isZero() && SE.isLoopInvariant(S, L))
+    return true;
+
   auto *AR = dyn_cast<SCEVAddRecExpr>(S);
   if (!AR || AR->getLoop() != L)
     return false;
@@ -69,10 +74,10 @@
 bool UnrolledInstAnalyzer::visitBinaryOperator(BinaryOperator &I) {
   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
   if (!isa<Constant>(LHS))
-    if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
+    if (Value *SimpleLHS = SimplifiedValues.lookup(LHS))
       LHS = SimpleLHS;
   if (!isa<Constant>(RHS))
-    if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
+    if (Value *SimpleRHS = SimplifiedValues.lookup(RHS))
       RHS = SimpleRHS;
 
   Value *SimpleV = nullptr;
@@ -83,11 +88,10 @@
   else
     SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
 
-  if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
-    SimplifiedValues[&I] = C;
-
-  if (SimpleV)
+  if (SimpleV) {
+    SimplifiedValues[&I] = SimpleV;
     return true;
+  }
   return Base::visitBinaryOperator(I);
 }
 
@@ -142,20 +146,17 @@
 
 /// Try to simplify cast instruction.
 bool UnrolledInstAnalyzer::visitCastInst(CastInst &I) {
-  // Propagate constants through casts.
-  Constant *COp = dyn_cast<Constant>(I.getOperand(0));
-  if (!COp)
-    COp = SimplifiedValues.lookup(I.getOperand(0));
+  Value *Op = I.getOperand(0);
+  if (Value *Simplified = SimplifiedValues.lookup(Op))
+    Op = Simplified;
 
-  // If we know a simplified value for this operand and cast is valid, save the
-  // result to SimplifiedValues.
   // The cast can be invalid, because SimplifiedValues contains results of SCEV
   // analysis, which operates on integers (and, e.g., might convert i8* null to
   // i32 0).
-  if (COp && CastInst::castIsValid(I.getOpcode(), COp, I.getType())) {
-    if (Constant *C =
-            ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
-      SimplifiedValues[&I] = C;
+  if (CastInst::castIsValid(I.getOpcode(), Op, I.getType())) {
+    const DataLayout &DL = I.getModule()->getDataLayout();
+    if (Value *V = SimplifyCastInst(I.getOpcode(), Op, I.getType(), DL)) {
+      SimplifiedValues[&I] = V;
       return true;
     }
   }
@@ -169,10 +170,10 @@
 
   // First try to handle simplified comparisons.
   if (!isa<Constant>(LHS))
-    if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
+    if (Value *SimpleLHS = SimplifiedValues.lookup(LHS))
       LHS = SimpleLHS;
   if (!isa<Constant>(RHS))
-    if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
+    if (Value *SimpleRHS = SimplifiedValues.lookup(RHS))
       RHS = SimpleRHS;
 
   if (!isa<Constant>(LHS) && !isa<Constant>(RHS)) {
@@ -190,15 +191,10 @@
     }
   }
 
-  if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
-    if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
-      if (CLHS->getType() == CRHS->getType()) {
-        if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
-          SimplifiedValues[&I] = C;
-          return true;
-        }
-      }
-    }
+  const DataLayout &DL = I.getModule()->getDataLayout();
+  if (Value *V = SimplifyCmpInst(I.getPredicate(), LHS, RHS, DL)) {
+    SimplifiedValues[&I] = V;
+    return true;
   }
 
   return Base::visitCmpInst(I);
@@ -213,3 +209,7 @@
   // The loop induction PHI nodes are definitionally free.
   return PN.getParent() == L->getHeader();
 }
+
+bool UnrolledInstAnalyzer::visitInstruction(Instruction &I) {
+  return simplifyInstWithSCEV(&I);
+}
diff --git a/src/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp b/src/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp
index 89f4ff4..5b95ed2 100644
--- a/src/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/MLInlineAdvisor.cpp
@@ -43,11 +43,19 @@
              "blocking any further inlining."),
     cl::init(2.0));
 
+// clang-format off
 const std::array<std::string, NumberOfFeatures> llvm::FeatureNameMap{
+// InlineCost features - these must come first
+#define POPULATE_NAMES(INDEX_NAME, NAME) NAME,
+  INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES)
+#undef POPULATE_NAMES
+
+// Non-cost features
 #define POPULATE_NAMES(INDEX_NAME, NAME, COMMENT) NAME,
-    INLINE_FEATURE_ITERATOR(POPULATE_NAMES)
+  INLINE_FEATURE_ITERATOR(POPULATE_NAMES)
 #undef POPULATE_NAMES
 };
+// clang-format on
 
 const char *const llvm::DecisionName = "inlining_decision";
 const char *const llvm::DefaultDecisionName = "inlining_default";
@@ -134,7 +142,11 @@
   Function *Callee = Advice.getCallee();
 
   // The caller features aren't valid anymore.
-  FAM.invalidate<FunctionPropertiesAnalysis>(*Caller);
+  {
+    PreservedAnalyses PA = PreservedAnalyses::all();
+    PA.abandon<FunctionPropertiesAnalysis>();
+    FAM.invalidate(*Caller, PA);
+  }
   int64_t IRSizeAfter =
       getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
   CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
@@ -213,6 +225,12 @@
     CostEstimate = *IsCallSiteInlinable;
   }
 
+  const auto CostFeatures =
+      llvm::getInliningCostFeatures(CB, TIR, GetAssumptionCache);
+  if (!CostFeatures) {
+    return std::make_unique<InlineAdvice>(this, CB, ORE, false);
+  }
+
   if (Mandatory)
     return getMandatoryAdvice(CB, true);
 
@@ -230,7 +248,6 @@
                           FunctionLevels[&Caller]);
   ModelRunner->setFeature(FeatureIndex::NodeCount, NodeCount);
   ModelRunner->setFeature(FeatureIndex::NrCtantParams, NrCtantParams);
-  ModelRunner->setFeature(FeatureIndex::CostEstimate, CostEstimate);
   ModelRunner->setFeature(FeatureIndex::EdgeCount, EdgeCount);
   ModelRunner->setFeature(FeatureIndex::CallerUsers, CallerBefore.Uses);
   ModelRunner->setFeature(FeatureIndex::CallerConditionallyExecutedBlocks,
@@ -240,6 +257,16 @@
   ModelRunner->setFeature(FeatureIndex::CalleeConditionallyExecutedBlocks,
                           CalleeBefore.BlocksReachedFromConditionalInstruction);
   ModelRunner->setFeature(FeatureIndex::CalleeUsers, CalleeBefore.Uses);
+  ModelRunner->setFeature(FeatureIndex::CostEstimate, CostEstimate);
+
+  // Add the cost features
+  for (size_t I = 0;
+       I < static_cast<size_t>(InlineCostFeatureIndex::NumberOfFeatures); ++I) {
+    ModelRunner->setFeature(
+        inlineCostFeatureToMlFeature(static_cast<InlineCostFeatureIndex>(I)),
+        CostFeatures->at(I));
+  }
+
   return getAdviceFromModel(CB, ORE);
 }
 
diff --git a/src/llvm-project/llvm/lib/Analysis/MemDerefPrinter.cpp b/src/llvm-project/llvm/lib/Analysis/MemDerefPrinter.cpp
index 0078cea..1b16e1a 100644
--- a/src/llvm-project/llvm/lib/Analysis/MemDerefPrinter.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/MemDerefPrinter.cpp
@@ -70,12 +70,13 @@
 void MemDerefPrinter::print(raw_ostream &OS, const Module *M) const {
   OS << "The following are dereferenceable:\n";
   for (Value *V: Deref) {
+    OS << "  ";
     V->print(OS);
     if (DerefAndAligned.count(V))
       OS << "\t(aligned)";
     else
       OS << "\t(unaligned)";
-    OS << "\n\n";
+    OS << "\n";
   }
 }
 
@@ -101,12 +102,13 @@
 
   OS << "The following are dereferenceable:\n";
   for (Value *V : Deref) {
+    OS << "  ";
     V->print(OS);
     if (DerefAndAligned.count(V))
       OS << "\t(aligned)";
     else
       OS << "\t(unaligned)";
-    OS << "\n\n";
+    OS << "\n";
   }
   return PreservedAnalyses::all();
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp b/src/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
index a42f54d..a2e1364 100644
--- a/src/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -103,6 +103,7 @@
   {LibFunc_msvc_new_array_longlong,         {OpNewLike,   1, 0,  -1}}, // new[](unsigned long long)
   {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike,  2, 0,  -1}}, // new[](unsigned long long, nothrow)
   {LibFunc_aligned_alloc,       {AlignedAllocLike, 2, 1,  -1}},
+  {LibFunc_memalign,            {AlignedAllocLike, 2, 1,  -1}},
   {LibFunc_calloc,              {CallocLike,  2, 0,   1}},
   {LibFunc_vec_calloc,          {CallocLike,  2, 0,   1}},
   {LibFunc_realloc,             {ReallocLike, 2, 1,  -1}},
@@ -110,6 +111,7 @@
   {LibFunc_reallocf,            {ReallocLike, 2, 1,  -1}},
   {LibFunc_strdup,              {StrDupLike,  1, -1, -1}},
   {LibFunc_strndup,             {StrDupLike,  2, 1,  -1}},
+  {LibFunc___kmpc_alloc_shared, {MallocLike,  1, 0,  -1}},
 
   {LibFunc_rust_alloc,         {MallocLike,  2, 0,  -1}},
   {LibFunc_rust_realloc,       {ReallocLike,  4, 3,  -1}},
@@ -143,9 +145,8 @@
 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy,
                              const TargetLibraryInfo *TLI) {
   // Make sure that the function is available.
-  StringRef FnName = Callee->getName();
   LibFunc TLIFn;
-  if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+  if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn))
     return None;
 
   const auto *Iter = find_if(
@@ -458,7 +459,8 @@
            TLIFn == LibFunc_msvc_delete_array_ptr32_int ||      // delete[](void*, uint)
            TLIFn == LibFunc_msvc_delete_array_ptr64_longlong || // delete[](void*, ulonglong)
            TLIFn == LibFunc_msvc_delete_array_ptr32_nothrow || // delete[](void*, nothrow)
-           TLIFn == LibFunc_msvc_delete_array_ptr64_nothrow)   // delete[](void*, nothrow)
+           TLIFn == LibFunc_msvc_delete_array_ptr64_nothrow || // delete[](void*, nothrow)
+           TLIFn == LibFunc___kmpc_free_shared) // OpenMP Offloading RTL free
     ExpectedNumParams = 2;
   else if (TLIFn == LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t || // delete(void*, align_val_t, nothrow)
            TLIFn == LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t || // delete[](void*, align_val_t, nothrow)
@@ -493,9 +495,8 @@
   if (Callee == nullptr || IsNoBuiltinCall)
     return nullptr;
 
-  StringRef FnName = Callee->getName();
   LibFunc TLIFn;
-  if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
+  if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn))
     return nullptr;
 
   return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr;
@@ -959,7 +960,14 @@
 
   // must be a VLA
   assert(I.isArrayAllocation());
-  Value *ArraySize = I.getArraySize();
+
+  // If needed, adjust the alloca's operand size to match the pointer size.
+  // Subsequent math operations expect the types to match.
+  Value *ArraySize = Builder.CreateZExtOrTrunc(
+      I.getArraySize(), DL.getIntPtrType(I.getContext()));
+  assert(ArraySize->getType() == Zero->getType() &&
+         "Expected zero constant to have pointer type");
+
   Value *Size = ConstantInt::get(ArraySize->getType(),
                                  DL.getTypeAllocSize(I.getAllocatedType()));
   Size = Builder.CreateMul(Size, ArraySize);
diff --git a/src/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index 886b5bf..b44d15e 100644
--- a/src/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -243,19 +243,10 @@
   return MemDepResult::getNonFuncLocal();
 }
 
-static bool isVolatile(Instruction *Inst) {
-  if (auto *LI = dyn_cast<LoadInst>(Inst))
-    return LI->isVolatile();
-  if (auto *SI = dyn_cast<StoreInst>(Inst))
-    return SI->isVolatile();
-  if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
-    return AI->isVolatile();
-  return false;
-}
-
 MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
-    BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
+    BasicBlock *BB, Instruction *QueryInst, unsigned *Limit,
+    BatchAAResults &BatchAA) {
   MemDepResult InvariantGroupDependency = MemDepResult::getUnknown();
   if (QueryInst != nullptr) {
     if (auto *LI = dyn_cast<LoadInst>(QueryInst)) {
@@ -266,7 +257,7 @@
     }
   }
   MemDepResult SimpleDep = getSimplePointerDependencyFrom(
-      MemLoc, isLoad, ScanIt, BB, QueryInst, Limit);
+      MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, BatchAA);
   if (SimpleDep.isDef())
     return SimpleDep;
   // Non-local invariant group dependency indicates there is non local Def
@@ -280,6 +271,14 @@
   return SimpleDep;
 }
 
+MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
+    const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
+    BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
+  BatchAAResults BatchAA(AA);
+  return getPointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst, Limit,
+                                  BatchAA);
+}
+
 MemDepResult
 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
                                                             BasicBlock *BB) {
@@ -369,9 +368,8 @@
 
 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
-    BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
-  // We can batch AA queries, because IR does not change during a MemDep query.
-  BatchAAResults BatchAA(AA);
+    BasicBlock *BB, Instruction *QueryInst, unsigned *Limit,
+    BatchAAResults &BatchAA) {
   bool isInvariantLoad = false;
 
   unsigned DefaultLimit = getDefaultBlockScanLimit();
@@ -467,9 +465,9 @@
         MemoryLocation Loc;
         /*ModRefInfo MR =*/ GetLocation(II, Loc, TLI);
         AliasResult R = BatchAA.alias(Loc, MemLoc);
-        if (R == NoAlias)
+        if (R == AliasResult::NoAlias)
           continue;
-        if (R == MustAlias)
+        if (R == AliasResult::MustAlias)
           return MemDepResult::getDef(II);
         if (ID == Intrinsic::masked_load)
           continue;
@@ -491,7 +489,7 @@
         if (!QueryInst)
           // Original QueryInst *may* be volatile
           return MemDepResult::getClobber(LI);
-        if (isVolatile(QueryInst))
+        if (QueryInst->isVolatile())
           // Ordering required if QueryInst is itself volatile
           return MemDepResult::getClobber(LI);
         // Otherwise, volatile doesn't imply any special ordering
@@ -515,23 +513,19 @@
       AliasResult R = BatchAA.alias(LoadLoc, MemLoc);
 
       if (isLoad) {
-        if (R == NoAlias)
+        if (R == AliasResult::NoAlias)
           continue;
 
         // Must aliased loads are defs of each other.
-        if (R == MustAlias)
+        if (R == AliasResult::MustAlias)
           return MemDepResult::getDef(Inst);
 
-#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
-      // in terms of clobbering loads, but since it does this by looking
-      // at the clobbering load directly, it doesn't know about any
-      // phi translation that may have happened along the way.
-
         // If we have a partial alias, then return this as a clobber for the
         // client to handle.
-        if (R == PartialAlias)
+        if (R == AliasResult::PartialAlias && R.hasOffset()) {
+          ClobberOffsets[LI] = R.getOffset();
           return MemDepResult::getClobber(Inst);
-#endif
+        }
 
         // Random may-alias loads don't depend on each other without a
         // dependence.
@@ -539,7 +533,7 @@
       }
 
       // Stores don't depend on other no-aliased accesses.
-      if (R == NoAlias)
+      if (R == AliasResult::NoAlias)
         continue;
 
       // Stores don't alias loads from read-only memory.
@@ -585,9 +579,9 @@
       // If we found a pointer, check if it could be the same as our pointer.
       AliasResult R = BatchAA.alias(StoreLoc, MemLoc);
 
-      if (R == NoAlias)
+      if (R == AliasResult::NoAlias)
         continue;
-      if (R == MustAlias)
+      if (R == AliasResult::MustAlias)
         return MemDepResult::getDef(Inst);
       if (isInvariantLoad)
         continue;
@@ -622,8 +616,7 @@
     ModRefInfo MR = BatchAA.getModRefInfo(Inst, MemLoc);
     // If necessary, perform additional analysis.
     if (isModAndRefSet(MR))
-      // TODO: Support callCapturesBefore() on BatchAAResults.
-      MR = AA.callCapturesBefore(Inst, MemLoc, &DT);
+      MR = BatchAA.callCapturesBefore(Inst, MemLoc, &DT);
     switch (clearMust(MR)) {
     case ModRefInfo::NoModRef:
       // If the call has no effect on the queried pointer, just ignore it.
@@ -650,6 +643,7 @@
 }
 
 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) {
+  ClobberOffsets.clear();
   Instruction *ScanPos = QueryInst;
 
   // Check for a cached result
@@ -723,7 +717,7 @@
   assert(getDependency(QueryCall).isNonLocal() &&
          "getNonLocalCallDependency should only be used on calls with "
          "non-local deps!");
-  PerInstNLInfo &CacheP = NonLocalDeps[QueryCall];
+  PerInstNLInfo &CacheP = NonLocalDepsMap[QueryCall];
   NonLocalDepInfo &Cache = CacheP.first;
 
   // This is the set of blocks that need to be recomputed.  In the cached case,
@@ -882,7 +876,7 @@
     }
     return false;
   };
-  if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
+  if (QueryInst->isVolatile() || isOrdered(QueryInst)) {
     Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
                                        const_cast<Value *>(Loc.Ptr)));
     return;
@@ -908,9 +902,10 @@
 /// info if available).
 ///
 /// If we do a lookup, add the result to the cache.
-MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock(
+MemDepResult MemoryDependenceResults::getNonLocalInfoForBlock(
     Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
-    BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
+    BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries,
+    BatchAAResults &BatchAA) {
 
   bool isInvariantLoad = false;
 
@@ -960,8 +955,8 @@
   }
 
   // Scan the block for the dependency.
-  MemDepResult Dep =
-      getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst);
+  MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB,
+                                              QueryInst, nullptr, BatchAA);
 
   // Don't cache results for invariant load.
   if (isInvariantLoad)
@@ -1135,9 +1130,6 @@
   // investigating, just return it with no recomputation.
   // Don't use cached information for invariant loads since it is valid for
   // non-invariant loads only.
-  //
-  // Don't use cached information for invariant loads since it is valid for
-  // non-invariant loads only.
   if (!IsIncomplete && !isInvariantLoad &&
       CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
     // We have a fully cached result for this query then we can just return the
@@ -1205,6 +1197,7 @@
   bool GotWorklistLimit = false;
   LLVM_DEBUG(AssertSorted(*Cache));
 
+  BatchAAResults BatchAA(AA);
   while (!Worklist.empty()) {
     BasicBlock *BB = Worklist.pop_back_val();
 
@@ -1235,8 +1228,8 @@
       // Get the dependency info for Pointer in BB.  If we have cached
       // information, we will use it, otherwise we compute it.
       LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries));
-      MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB,
-                                                 Cache, NumSortedEntries);
+      MemDepResult Dep = getNonLocalInfoForBlock(
+          QueryInst, Loc, isLoad, BB, Cache, NumSortedEntries, BatchAA);
 
       // If we got a Def or Clobber, add this to the list of results.
       if (!Dep.isNonLocal()) {
@@ -1458,7 +1451,7 @@
 }
 
 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it.
-void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies(
+void MemoryDependenceResults::removeCachedNonLocalPointerDependencies(
     ValueIsLoadPair P) {
 
   // Most of the time this cache is empty.
@@ -1507,9 +1500,9 @@
   if (!Ptr->getType()->isPointerTy())
     return;
   // Flush store info for the pointer.
-  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
+  removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
   // Flush load info for the pointer.
-  RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
+  removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
   // Invalidate phis that use the pointer.
   PV.invalidateValue(Ptr);
 }
@@ -1521,13 +1514,13 @@
 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
   // Walk through the Non-local dependencies, removing this one as the value
   // for any cached queries.
-  NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
-  if (NLDI != NonLocalDeps.end()) {
+  NonLocalDepMapType::iterator NLDI = NonLocalDepsMap.find(RemInst);
+  if (NLDI != NonLocalDepsMap.end()) {
     NonLocalDepInfo &BlockMap = NLDI->second.first;
     for (auto &Entry : BlockMap)
       if (Instruction *Inst = Entry.getResult().getInst())
         RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
-    NonLocalDeps.erase(NLDI);
+    NonLocalDepsMap.erase(NLDI);
   }
 
   // If we have a cached local dependence query for this instruction, remove it.
@@ -1547,8 +1540,8 @@
   // If the instruction is a pointer, remove it from both the load info and the
   // store info.
   if (RemInst->getType()->isPointerTy()) {
-    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
-    RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
+    removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
+    removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
   } else {
     // Otherwise, if the instructions is in the map directly, it must be a load.
     // Remove it.
@@ -1611,7 +1604,7 @@
     for (Instruction *I : ReverseDepIt->second) {
       assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
 
-      PerInstNLInfo &INLD = NonLocalDeps[I];
+      PerInstNLInfo &INLD = NonLocalDepsMap[I];
       // The information is now dirty!
       INLD.second = true;
 
@@ -1683,7 +1676,7 @@
   // Invalidate phis that use the removed instruction.
   PV.invalidateValue(RemInst);
 
-  assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
+  assert(!NonLocalDepsMap.count(RemInst) && "RemInst got reinserted?");
   LLVM_DEBUG(verifyRemoved(RemInst));
 }
 
@@ -1704,7 +1697,7 @@
       assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value");
   }
 
-  for (const auto &DepKV : NonLocalDeps) {
+  for (const auto &DepKV : NonLocalDepsMap) {
     assert(DepKV.first != D && "Inst occurs in data structures");
     const PerInstNLInfo &INLD = DepKV.second;
     for (const auto &Entry : INLD.first)
diff --git a/src/llvm-project/llvm/lib/Analysis/MemorySSA.cpp b/src/llvm-project/llvm/lib/Analysis/MemorySSA.cpp
index 4722b68..b402b04 100644
--- a/src/llvm-project/llvm/lib/Analysis/MemorySSA.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/MemorySSA.cpp
@@ -21,6 +21,7 @@
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/ADT/iterator.h"
 #include "llvm/ADT/iterator_range.h"
 #include "llvm/Analysis/AliasAnalysis.h"
@@ -285,7 +286,7 @@
     case Intrinsic::invariant_end:
     case Intrinsic::assume:
     case Intrinsic::experimental_noalias_scope_decl:
-      return {false, NoAlias};
+      return {false, AliasResult(AliasResult::NoAlias)};
     case Intrinsic::dbg_addr:
     case Intrinsic::dbg_declare:
     case Intrinsic::dbg_label:
@@ -298,16 +299,17 @@
 
   if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
     ModRefInfo I = AA.getModRefInfo(DefInst, CB);
-    AR = isMustSet(I) ? MustAlias : MayAlias;
+    AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
     return {isModOrRefSet(I), AR};
   }
 
   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
     if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
-      return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
+      return {!areLoadsReorderable(UseLoad, DefLoad),
+              AliasResult(AliasResult::MayAlias)};
 
   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
-  AR = isMustSet(I) ? MustAlias : MayAlias;
+  AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
   return {isModSet(I), AR};
 }
 
@@ -343,7 +345,7 @@
   const Instruction *Inst = nullptr;
   // The MemoryAccess we actually got called with, used to test local domination
   const MemoryAccess *OriginalAccess = nullptr;
-  Optional<AliasResult> AR = MayAlias;
+  Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias);
   bool SkipSelfAccess = false;
 
   UpwardsMemoryQuery() = default;
@@ -569,14 +571,14 @@
     for (MemoryAccess *Current : def_chain(Desc.Last)) {
       Desc.Last = Current;
       if (Current == StopAt || Current == SkipStopAt)
-        return {Current, false, MayAlias};
+        return {Current, false, AliasResult(AliasResult::MayAlias)};
 
       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
         if (MSSA.isLiveOnEntryDef(MD))
-          return {MD, true, MustAlias};
+          return {MD, true, AliasResult(AliasResult::MustAlias)};
 
         if (!--*UpwardWalkLimit)
-          return {Current, true, MayAlias};
+          return {Current, true, AliasResult(AliasResult::MayAlias)};
 
         ClobberAlias CA =
             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
@@ -590,7 +592,7 @@
 
     assert(isa<MemoryPhi>(Desc.Last) &&
            "Ended at a non-clobber that's not a phi?");
-    return {Desc.Last, false, MayAlias};
+    return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
   }
 
   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
@@ -1408,7 +1410,7 @@
     if (!LocInfo.LastKillValid) {
       LocInfo.LastKill = VersionStack.size() - 1;
       LocInfo.LastKillValid = true;
-      LocInfo.AR = MayAlias;
+      LocInfo.AR = AliasResult::MayAlias;
     }
 
     // At this point, we should have corrected last kill and LowerBound to be
@@ -2171,17 +2173,13 @@
 }
 
 void MemoryPhi::print(raw_ostream &OS) const {
-  bool First = true;
+  ListSeparator LS(",");
   OS << getID() << " = MemoryPhi(";
   for (const auto &Op : operands()) {
     BasicBlock *BB = getIncomingBlock(Op);
     MemoryAccess *MA = cast<MemoryAccess>(Op);
-    if (!First)
-      OS << ',';
-    else
-      First = false;
 
-    OS << '{';
+    OS << LS << '{';
     if (BB->hasName())
       OS << BB->getName();
     else
@@ -2405,22 +2403,23 @@
 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
     unsigned &UpwardWalkLimit) {
-  if (isa<MemoryPhi>(StartingAccess))
-    return StartingAccess;
+  assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
 
-  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
-  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
-    return StartingUseOrDef;
+  Instruction *I = nullptr;
+  if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
+    if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
+      return StartingUseOrDef;
 
-  Instruction *I = StartingUseOrDef->getMemoryInst();
+    I = StartingUseOrDef->getMemoryInst();
 
-  // Conservatively, fences are always clobbers, so don't perform the walk if we
-  // hit a fence.
-  if (!isa<CallBase>(I) && I->isFenceLike())
-    return StartingUseOrDef;
+    // Conservatively, fences are always clobbers, so don't perform the walk if
+    // we hit a fence.
+    if (!isa<CallBase>(I) && I->isFenceLike())
+      return StartingUseOrDef;
+  }
 
   UpwardsMemoryQuery Q;
-  Q.OriginalAccess = StartingUseOrDef;
+  Q.OriginalAccess = StartingAccess;
   Q.StartingLoc = Loc;
   Q.Inst = nullptr;
   Q.IsCall = false;
@@ -2428,16 +2427,14 @@
   // Unlike the other function, do not walk to the def of a def, because we are
   // handed something we already believe is the clobbering access.
   // We never set SkipSelf to true in Q in this method.
-  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
-                                     ? StartingUseOrDef->getDefiningAccess()
-                                     : StartingUseOrDef;
-
   MemoryAccess *Clobber =
-      Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
-  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
-  LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
-  LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
-  LLVM_DEBUG(dbgs() << *Clobber << "\n");
+      Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
+  LLVM_DEBUG({
+    dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
+    if (I)
+      dbgs() << "  for instruction " << *I << "\n";
+    dbgs() << "  is " << *Clobber << "\n";
+  });
   return Clobber;
 }
 
@@ -2494,8 +2491,9 @@
     StartingAccess->setOptimized(OptimizedAccess);
     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
       StartingAccess->setOptimizedAccessType(None);
-    else if (Q.AR == MustAlias)
-      StartingAccess->setOptimizedAccessType(MustAlias);
+    else if (Q.AR && *Q.AR == AliasResult::MustAlias)
+      StartingAccess->setOptimizedAccessType(
+          AliasResult(AliasResult::MustAlias));
   } else
     OptimizedAccess = StartingAccess->getOptimized();
 
@@ -2554,6 +2552,10 @@
   };
 
   Ptr = Ptr->stripPointerCasts();
+  if (auto *I = dyn_cast<Instruction>(Ptr)) {
+    if (I->getParent()->isEntryBlock())
+      return true;
+  }
   if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
     return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
            GEP->hasAllConstantIndices();
diff --git a/src/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp b/src/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp
index 99fa58b..616864f 100644
--- a/src/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/MemorySSAUpdater.cpp
@@ -363,14 +363,11 @@
     // place, compute IDF and place phis.
     SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
 
-    // If this is the last Def in the block, also compute IDF based on MD, since
-    // this may a new Def added, and we may need additional Phis.
-    auto Iter = MD->getDefsIterator();
-    ++Iter;
-    auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
-    if (Iter == IterEnd)
-      DefiningBlocks.insert(MD->getBlock());
-
+    // If this is the last Def in the block, we may need additional Phis.
+    // Compute IDF in all cases, as renaming needs to be done even when MD is
+    // not the last access, because it can introduce a new access past which a
+    // previous access was optimized; that access needs to be reoptimized.
+    DefiningBlocks.insert(MD->getBlock());
     for (const auto &VH : InsertedPHIs)
       if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
         DefiningBlocks.insert(RealPHI->getBlock());
@@ -1395,11 +1392,9 @@
     MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
     if (!Acc)
       continue;
-    for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
-      MemoryAccess *MA = &*AB;
-      ++AB;
-      MSSA->removeFromLookups(MA);
-      MSSA->removeFromLists(MA);
+    for (MemoryAccess &MA : llvm::make_early_inc_range(*Acc)) {
+      MSSA->removeFromLookups(&MA);
+      MSSA->removeFromLists(&MA);
     }
   }
 }
@@ -1431,22 +1426,6 @@
   tryRemoveTrivialPhis(UpdatedPHIs);
 }
 
-void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
-                                                         const BasicBlock *To) {
-  const BasicBlock *BB = BI->getParent();
-  SmallVector<WeakVH, 16> UpdatedPHIs;
-  for (const BasicBlock *Succ : successors(BB)) {
-    removeDuplicatePhiEdgesBetween(BB, Succ);
-    if (Succ != To)
-      if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
-        MPhi->unorderedDeleteIncomingBlock(BB);
-        UpdatedPHIs.push_back(MPhi);
-      }
-  }
-  // Optimize trivial phis.
-  tryRemoveTrivialPhis(UpdatedPHIs);
-}
-
 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
     Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
     MemorySSA::InsertionPlace Point) {
diff --git a/src/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
index 5f7746e..e435532 100644
--- a/src/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
@@ -50,6 +50,7 @@
 #include "llvm/Pass.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
 #include <algorithm>
 #include <cassert>
 #include <cstdint>
@@ -91,14 +92,11 @@
                          SmallPtrSet<const User *, 8> &Visited) {
   bool HasBlockAddress = false;
   SmallVector<const User *, 32> Worklist;
-  Worklist.push_back(CurUser);
+  if (Visited.insert(CurUser).second)
+    Worklist.push_back(CurUser);
 
   while (!Worklist.empty()) {
     const User *U = Worklist.pop_back_val();
-
-    if (!Visited.insert(U).second)
-      continue;
-
     const auto *CB = dyn_cast<CallBase>(U);
 
     for (const auto &OI : U->operands()) {
@@ -117,7 +115,8 @@
           RefEdges.insert(Index.getOrInsertValueInfo(GV));
         continue;
       }
-      Worklist.push_back(Operand);
+      if (Visited.insert(Operand).second)
+        Worklist.push_back(Operand);
     }
   }
   return HasBlockAddress;
@@ -178,11 +177,7 @@
     // Intrinsics that are assumed are relevant only to the devirtualization
     // pass, not the type test lowering pass.
     bool HasNonAssumeUses = llvm::any_of(CI->uses(), [](const Use &CIU) {
-      auto *AssumeCI = dyn_cast<CallInst>(CIU.getUser());
-      if (!AssumeCI)
-        return true;
-      Function *F = AssumeCI->getCalledFunction();
-      return !F || F->getIntrinsicID() != Intrinsic::assume;
+      return !isa<AssumeInst>(CIU.getUser());
     });
     if (HasNonAssumeUses)
       TypeTests.insert(Guid);
@@ -459,9 +454,10 @@
   bool NonRenamableLocal = isNonRenamableLocal(F);
   bool NotEligibleForImport =
       NonRenamableLocal || HasInlineAsmMaybeReferencingInternal;
-  GlobalValueSummary::GVFlags Flags(F.getLinkage(), NotEligibleForImport,
-                                    /* Live = */ false, F.isDSOLocal(),
-                                    F.hasLinkOnceODRLinkage() && F.hasGlobalUnnamedAddr());
+  GlobalValueSummary::GVFlags Flags(
+      F.getLinkage(), F.getVisibility(), NotEligibleForImport,
+      /* Live = */ false, F.isDSOLocal(),
+      F.hasLinkOnceODRLinkage() && F.hasGlobalUnnamedAddr());
   FunctionSummary::FFlags FunFlags{
       F.hasFnAttribute(Attribute::ReadNone),
       F.hasFnAttribute(Attribute::ReadOnly),
@@ -510,10 +506,8 @@
     assert(STy);
     const StructLayout *SL = DL.getStructLayout(C->getType());
 
-    for (StructType::element_iterator EB = STy->element_begin(), EI = EB,
-                                      EE = STy->element_end();
-         EI != EE; ++EI) {
-      auto Offset = SL->getElementOffset(EI - EB);
+    for (auto EI : llvm::enumerate(STy->elements())) {
+      auto Offset = SL->getElementOffset(EI.index());
       unsigned Op = SL->getElementContainingOffset(Offset);
       findFuncPointers(cast<Constant>(I->getOperand(Op)),
                        StartingOffset + Offset, M, Index, VTableFuncs);
@@ -580,9 +574,10 @@
   SmallPtrSet<const User *, 8> Visited;
   bool HasBlockAddress = findRefEdges(Index, &V, RefEdges, Visited);
   bool NonRenamableLocal = isNonRenamableLocal(V);
-  GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
-                                    /* Live = */ false, V.isDSOLocal(),
-                                    V.hasLinkOnceODRLinkage() && V.hasGlobalUnnamedAddr());
+  GlobalValueSummary::GVFlags Flags(
+      V.getLinkage(), V.getVisibility(), NonRenamableLocal,
+      /* Live = */ false, V.isDSOLocal(),
+      V.hasLinkOnceODRLinkage() && V.hasGlobalUnnamedAddr());
 
   VTableFuncList VTableFuncs;
   // If splitting is not enabled, then we compute the summary information
@@ -622,9 +617,10 @@
 computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
                     DenseSet<GlobalValue::GUID> &CantBePromoted) {
   bool NonRenamableLocal = isNonRenamableLocal(A);
-  GlobalValueSummary::GVFlags Flags(A.getLinkage(), NonRenamableLocal,
-                                    /* Live = */ false, A.isDSOLocal(),
-                                    A.hasLinkOnceODRLinkage() && A.hasGlobalUnnamedAddr());
+  GlobalValueSummary::GVFlags Flags(
+      A.getLinkage(), A.getVisibility(), NonRenamableLocal,
+      /* Live = */ false, A.isDSOLocal(),
+      A.hasLinkOnceODRLinkage() && A.hasGlobalUnnamedAddr());
   auto AS = std::make_unique<AliasSummary>(Flags);
   auto *Aliasee = A.getBaseObject();
   auto AliaseeVI = Index.getValueInfo(Aliasee->getGUID());
@@ -661,12 +657,12 @@
   // promotion, but we may have opaque uses e.g. in inline asm. We collect them
   // here because we use this information to mark functions containing inline
   // assembly calls as not importable.
-  SmallPtrSet<GlobalValue *, 8> LocalsUsed;
-  SmallPtrSet<GlobalValue *, 8> Used;
+  SmallPtrSet<GlobalValue *, 4> LocalsUsed;
+  SmallVector<GlobalValue *, 4> Used;
   // First collect those in the llvm.used set.
-  collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false);
+  collectUsedGlobalVariables(M, Used, /*CompilerUsed=*/false);
   // Next collect those in the llvm.compiler.used set.
-  collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ true);
+  collectUsedGlobalVariables(M, Used, /*CompilerUsed=*/true);
   DenseSet<GlobalValue::GUID> CantBePromoted;
   for (auto *V : Used) {
     if (V->hasLocalLinkage()) {
@@ -697,11 +693,12 @@
           if (!GV)
             return;
           assert(GV->isDeclaration() && "Def in module asm already has definition");
-          GlobalValueSummary::GVFlags GVFlags(GlobalValue::InternalLinkage,
-                                              /* NotEligibleToImport = */ true,
-                                              /* Live = */ true,
-                                              /* Local */ GV->isDSOLocal(),
-                                              GV->hasLinkOnceODRLinkage() && GV->hasGlobalUnnamedAddr());
+          GlobalValueSummary::GVFlags GVFlags(
+              GlobalValue::InternalLinkage, GlobalValue::DefaultVisibility,
+              /* NotEligibleToImport = */ true,
+              /* Live = */ true,
+              /* Local */ GV->isDSOLocal(),
+              GV->hasLinkOnceODRLinkage() && GV->hasGlobalUnnamedAddr());
           CantBePromoted.insert(GV->getGUID());
           // Create the appropriate summary type.
           if (Function *F = dyn_cast<Function>(GV)) {
diff --git a/src/llvm-project/llvm/lib/Analysis/MustExecute.cpp b/src/llvm-project/llvm/lib/Analysis/MustExecute.cpp
index 1e76260..5ca72f5f 100644
--- a/src/llvm-project/llvm/lib/Analysis/MustExecute.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/MustExecute.cpp
@@ -8,6 +8,7 @@
 
 #include "llvm/Analysis/MustExecute.h"
 #include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/StringExtras.h"
 #include "llvm/Analysis/CFG.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/LoopInfo.h"
@@ -451,13 +452,9 @@
     else
       OS << " ; (mustexec in: ";
 
-    bool first = true;
-    for (const Loop *L : Loops) {
-      if (!first)
-        OS << ", ";
-      first = false;
-      OS << L->getHeader()->getName();
-    }
+    ListSeparator LS;
+    for (const Loop *L : Loops)
+      OS << LS << L->getHeader()->getName();
     OS << ")";
   }
 };
diff --git a/src/llvm-project/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
index 786d03f..0826b30 100644
--- a/src/llvm-project/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ObjCARCAliasAnalysis.cpp
@@ -49,7 +49,7 @@
   AliasResult Result =
       AAResultBase::alias(MemoryLocation(SA, LocA.Size, LocA.AATags),
                           MemoryLocation(SB, LocB.Size, LocB.AATags), AAQI);
-  if (Result != MayAlias)
+  if (Result != AliasResult::MayAlias)
     return Result;
 
   // If that failed, climb to the underlying object, including climbing through
@@ -61,13 +61,13 @@
                                  MemoryLocation::getBeforeOrAfter(UB), AAQI);
     // We can't use MustAlias or PartialAlias results here because
     // GetUnderlyingObjCPtr may return an offsetted pointer value.
-    if (Result == NoAlias)
-      return NoAlias;
+    if (Result == AliasResult::NoAlias)
+      return AliasResult::NoAlias;
   }
 
   // If that failed, fail. We don't need to chain here, since that's covered
   // by the earlier precise query.
-  return MayAlias;
+  return AliasResult::MayAlias;
 }
 
 bool ObjCARCAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
diff --git a/src/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp b/src/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp
index 95190780..704d15f 100644
--- a/src/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ObjCARCInstKind.cpp
@@ -140,6 +140,7 @@
     return ARCInstKind::User;
   case Intrinsic::objc_sync_exit:
     return ARCInstKind::User;
+  case Intrinsic::objc_clang_arc_noop_use:
   case Intrinsic::objc_arc_annotation_topdown_bbstart:
   case Intrinsic::objc_arc_annotation_topdown_bbend:
   case Intrinsic::objc_arc_annotation_bottomup_bbstart:
diff --git a/src/llvm-project/llvm/lib/Analysis/OverflowInstAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/OverflowInstAnalysis.cpp
new file mode 100644
index 0000000..9f17d5b
--- /dev/null
+++ b/src/llvm-project/llvm/lib/Analysis/OverflowInstAnalysis.cpp
@@ -0,0 +1,72 @@
+//==-- OverflowInstAnalysis.cpp - Utils to fold overflow insts ----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds routines to help analyse overflow instructions
+// and fold them into constants or other overflow instructions
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/OverflowInstAnalysis.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PatternMatch.h"
+
+using namespace llvm;
+using namespace llvm::PatternMatch;
+
+bool llvm::isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd,
+                                            Use *&Y) {
+  ICmpInst::Predicate Pred;
+  Value *X, *NotOp1;
+  int XIdx;
+  IntrinsicInst *II;
+
+  if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())))
+    return false;
+
+  ///   %Agg = call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???)
+  ///   %V = extractvalue { i4, i1 } %Agg, 1
+  auto matchMulOverflowCheck = [X, &II, &XIdx](Value *V) {
+    auto *Extract = dyn_cast<ExtractValueInst>(V);
+    // We should only be extracting the overflow bit.
+    if (!Extract || !Extract->getIndices().equals(1))
+      return false;
+
+    II = dyn_cast<IntrinsicInst>(Extract->getAggregateOperand());
+    if (!II ||
+        !match(II, m_CombineOr(m_Intrinsic<Intrinsic::umul_with_overflow>(),
+                               m_Intrinsic<Intrinsic::smul_with_overflow>())))
+      return false;
+
+    if (II->getArgOperand(0) == X)
+      XIdx = 0;
+    else if (II->getArgOperand(1) == X)
+      XIdx = 1;
+    else
+      return false;
+    return true;
+  };
+
+  bool Matched =
+      (IsAnd && Pred == ICmpInst::Predicate::ICMP_NE &&
+       matchMulOverflowCheck(Op1)) ||
+      (!IsAnd && Pred == ICmpInst::Predicate::ICMP_EQ &&
+       match(Op1, m_Not(m_Value(NotOp1))) && matchMulOverflowCheck(NotOp1));
+
+  if (!Matched)
+    return false;
+
+  Y = &II->getArgOperandUse(!XIdx);
+  return true;
+}
+
+bool llvm::isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1,
+                                            bool IsAnd) {
+  Use *Y;
+  return isCheckForZeroAndMulWithOverflow(Op0, Op1, IsAnd, Y);
+}
\ No newline at end of file
diff --git a/src/llvm-project/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/src/llvm-project/llvm/lib/Analysis/ProfileSummaryInfo.cpp
index c9671d4..6dda0bf 100644
--- a/src/llvm-project/llvm/lib/Analysis/ProfileSummaryInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ProfileSummaryInfo.cpp
@@ -23,49 +23,13 @@
 #include "llvm/Support/CommandLine.h"
 using namespace llvm;
 
-// The following two parameters determine the threshold for a count to be
-// considered hot/cold. These two parameters are percentile values (multiplied
-// by 10000). If the counts are sorted in descending order, the minimum count to
-// reach ProfileSummaryCutoffHot gives the threshold to determine a hot count.
-// Similarly, the minimum count to reach ProfileSummaryCutoffCold gives the
-// threshold for determining cold count (everything <= this threshold is
-// considered cold).
-
-static cl::opt<int> ProfileSummaryCutoffHot(
-    "profile-summary-cutoff-hot", cl::Hidden, cl::init(990000), cl::ZeroOrMore,
-    cl::desc("A count is hot if it exceeds the minimum count to"
-             " reach this percentile of total counts."));
-
-static cl::opt<int> ProfileSummaryCutoffCold(
-    "profile-summary-cutoff-cold", cl::Hidden, cl::init(999999), cl::ZeroOrMore,
-    cl::desc("A count is cold if it is below the minimum count"
-             " to reach this percentile of total counts."));
-
-static cl::opt<unsigned> ProfileSummaryHugeWorkingSetSizeThreshold(
-    "profile-summary-huge-working-set-size-threshold", cl::Hidden,
-    cl::init(15000), cl::ZeroOrMore,
-    cl::desc("The code working set size is considered huge if the number of"
-             " blocks required to reach the -profile-summary-cutoff-hot"
-             " percentile exceeds this count."));
-
-static cl::opt<unsigned> ProfileSummaryLargeWorkingSetSizeThreshold(
-    "profile-summary-large-working-set-size-threshold", cl::Hidden,
-    cl::init(12500), cl::ZeroOrMore,
-    cl::desc("The code working set size is considered large if the number of"
-             " blocks required to reach the -profile-summary-cutoff-hot"
-             " percentile exceeds this count."));
-
-// The next two options override the counts derived from summary computation and
-// are useful for debugging purposes.
-static cl::opt<int> ProfileSummaryHotCount(
-    "profile-summary-hot-count", cl::ReallyHidden, cl::ZeroOrMore,
-    cl::desc("A fixed hot count that overrides the count derived from"
-             " profile-summary-cutoff-hot"));
-
-static cl::opt<int> ProfileSummaryColdCount(
-    "profile-summary-cold-count", cl::ReallyHidden, cl::ZeroOrMore,
-    cl::desc("A fixed cold count that overrides the count derived from"
-             " profile-summary-cutoff-cold"));
+// Knobs for profile summary based thresholds.
+extern cl::opt<int> ProfileSummaryCutoffHot;
+extern cl::opt<int> ProfileSummaryCutoffCold;
+extern cl::opt<unsigned> ProfileSummaryHugeWorkingSetSizeThreshold;
+extern cl::opt<unsigned> ProfileSummaryLargeWorkingSetSizeThreshold;
+extern cl::opt<int> ProfileSummaryHotCount;
+extern cl::opt<int> ProfileSummaryColdCount;
 
 static cl::opt<bool> PartialProfile(
     "partial-profile", cl::Hidden, cl::init(false),
@@ -95,13 +59,13 @@
   if (hasProfileSummary())
     return;
   // First try to get context sensitive ProfileSummary.
-  auto *SummaryMD = M.getProfileSummary(/* IsCS */ true);
+  auto *SummaryMD = M->getProfileSummary(/* IsCS */ true);
   if (SummaryMD)
     Summary.reset(ProfileSummary::getFromMD(SummaryMD));
 
   if (!hasProfileSummary()) {
     // This will actually return PSK_Instr or PSK_Sample summary.
-    SummaryMD = M.getProfileSummary(/* IsCS */ false);
+    SummaryMD = M->getProfileSummary(/* IsCS */ false);
     if (SummaryMD)
       Summary.reset(ProfileSummary::getFromMD(SummaryMD));
   }
@@ -274,14 +238,10 @@
   auto &DetailedSummary = Summary->getDetailedSummary();
   auto &HotEntry = ProfileSummaryBuilder::getEntryForPercentile(
       DetailedSummary, ProfileSummaryCutoffHot);
-  HotCountThreshold = HotEntry.MinCount;
-  if (ProfileSummaryHotCount.getNumOccurrences() > 0)
-    HotCountThreshold = ProfileSummaryHotCount;
-  auto &ColdEntry = ProfileSummaryBuilder::getEntryForPercentile(
-      DetailedSummary, ProfileSummaryCutoffCold);
-  ColdCountThreshold = ColdEntry.MinCount;
-  if (ProfileSummaryColdCount.getNumOccurrences() > 0)
-    ColdCountThreshold = ProfileSummaryColdCount;
+  HotCountThreshold =
+      ProfileSummaryBuilder::getHotCountThreshold(DetailedSummary);
+  ColdCountThreshold =
+      ProfileSummaryBuilder::getColdCountThreshold(DetailedSummary);
   assert(ColdCountThreshold <= HotCountThreshold &&
          "Cold count threshold cannot exceed hot count threshold!");
   if (!hasPartialSampleProfile() || !ScalePartialSampleProfileWorkingSetSize) {
diff --git a/src/llvm-project/llvm/lib/Analysis/ReleaseModeModelRunner.cpp b/src/llvm-project/llvm/lib/Analysis/ReleaseModeModelRunner.cpp
index 0b038b3..d2bf953 100644
--- a/src/llvm-project/llvm/lib/Analysis/ReleaseModeModelRunner.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ReleaseModeModelRunner.cpp
@@ -52,7 +52,7 @@
       CompiledModel(std::make_unique<llvm::InlinerSizeModel>()) {
   assert(CompiledModel && "The CompiledModel should be valid");
 
-  FeatureIndices.reserve(NumberOfFeatures);
+  FeatureIndices.resize(NumberOfFeatures);
 
   for (size_t I = 0; I < NumberOfFeatures; ++I) {
     const int Index =
diff --git a/src/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp b/src/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
index 1a9ae68..f22d834 100644
--- a/src/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -321,12 +321,9 @@
       llvm_unreachable("There are no other nary expression types.");
     }
     OS << "(";
-    for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
-         I != E; ++I) {
-      OS << **I;
-      if (std::next(I) != E)
-        OS << OpStr;
-    }
+    ListSeparator LS(OpStr);
+    for (const SCEV *Op : NAry->operands())
+      OS << LS << *Op;
     OS << ")";
     switch (NAry->getSCEVType()) {
     case scAddExpr:
@@ -389,12 +386,14 @@
   case scSignExtend:
     return cast<SCEVCastExpr>(this)->getType();
   case scAddRecExpr:
+    return cast<SCEVAddRecExpr>(this)->getType();
   case scMulExpr:
+    return cast<SCEVMulExpr>(this)->getType();
   case scUMaxExpr:
   case scSMaxExpr:
   case scUMinExpr:
   case scSMinExpr:
-    return cast<SCEVNAryExpr>(this)->getType();
+    return cast<SCEVMinMaxExpr>(this)->getType();
   case scAddExpr:
     return cast<SCEVAddExpr>(this)->getType();
   case scUDivExpr:
@@ -534,8 +533,7 @@
             CE->getNumOperands() == 2)
           if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
             if (CI->isOne()) {
-              AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
-                                 ->getElementType();
+              AllocTy = cast<GEPOperator>(CE)->getSourceElementType();
               return true;
             }
 
@@ -548,8 +546,7 @@
       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
         if (CE->getOpcode() == Instruction::GetElementPtr &&
             CE->getOperand(0)->isNullValue()) {
-          Type *Ty =
-            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
+          Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
           if (StructType *STy = dyn_cast<StructType>(Ty))
             if (!STy->isPacked() &&
                 CE->getNumOperands() == 3 &&
@@ -575,8 +572,7 @@
             CE->getNumOperands() == 3 &&
             CE->getOperand(0)->isNullValue() &&
             CE->getOperand(1)->isNullValue()) {
-          Type *Ty =
-            cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
+          Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
           // Ignore vector types here so that ScalarEvolutionExpander doesn't
           // emit getelementptrs that index into vectors.
           if (Ty->isStructTy() || Ty->isArrayTy()) {
@@ -689,11 +685,13 @@
 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
 // than RHS, respectively. A three-way result allows recursive comparisons to be
 // more efficient.
-static int CompareSCEVComplexity(
-    EquivalenceClasses<const SCEV *> &EqCacheSCEV,
-    EquivalenceClasses<const Value *> &EqCacheValue,
-    const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS,
-    DominatorTree &DT, unsigned Depth = 0) {
+// If the max analysis depth was reached, return None, assuming we do not know
+// if they are equivalent for sure.
+static Optional<int>
+CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
+                      EquivalenceClasses<const Value *> &EqCacheValue,
+                      const LoopInfo *const LI, const SCEV *LHS,
+                      const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
   // Fast-path: SCEVs are uniqued so we can do a quick equality check.
   if (LHS == RHS)
     return 0;
@@ -703,8 +701,12 @@
   if (LType != RType)
     return (int)LType - (int)RType;
 
-  if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS))
+  if (EqCacheSCEV.isEquivalent(LHS, RHS))
     return 0;
+
+  if (Depth > MaxSCEVCompareDepth)
+    return None;
+
   // Aside from the getSCEVType() ordering, the particular ordering
   // isn't very important except that it's beneficial to be consistent,
   // so that (a + b) and (b + a) don't end up as different expressions.
@@ -759,9 +761,9 @@
 
     // Lexicographically compare.
     for (unsigned i = 0; i != LNumOps; ++i) {
-      int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
-                                    LA->getOperand(i), RA->getOperand(i), DT,
-                                    Depth + 1);
+      auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
+                                     LA->getOperand(i), RA->getOperand(i), DT,
+                                     Depth + 1);
       if (X != 0)
         return X;
     }
@@ -784,9 +786,9 @@
       return (int)LNumOps - (int)RNumOps;
 
     for (unsigned i = 0; i != LNumOps; ++i) {
-      int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
-                                    LC->getOperand(i), RC->getOperand(i), DT,
-                                    Depth + 1);
+      auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
+                                     LC->getOperand(i), RC->getOperand(i), DT,
+                                     Depth + 1);
       if (X != 0)
         return X;
     }
@@ -799,8 +801,8 @@
     const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
 
     // Lexicographically compare udiv expressions.
-    int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
-                                  RC->getLHS(), DT, Depth + 1);
+    auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
+                                   RC->getLHS(), DT, Depth + 1);
     if (X != 0)
       return X;
     X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
@@ -818,9 +820,9 @@
     const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
 
     // Compare cast expressions by operand.
-    int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
-                                  LC->getOperand(), RC->getOperand(), DT,
-                                  Depth + 1);
+    auto X =
+        CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(),
+                              RC->getOperand(), DT, Depth + 1);
     if (X == 0)
       EqCacheSCEV.unionSets(LHS, RHS);
     return X;
@@ -847,19 +849,25 @@
 
   EquivalenceClasses<const SCEV *> EqCacheSCEV;
   EquivalenceClasses<const Value *> EqCacheValue;
+
+  // Whether LHS has provably less complexity than RHS.
+  auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
+    auto Complexity =
+        CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT);
+    return Complexity && *Complexity < 0;
+  };
   if (Ops.size() == 2) {
     // This is the common case, which also happens to be trivially simple.
     // Special case it.
     const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
-    if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0)
+    if (IsLessComplex(RHS, LHS))
       std::swap(LHS, RHS);
     return;
   }
 
   // Do the rough sort by complexity.
   llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
-    return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) <
-           0;
+    return IsLessComplex(LHS, RHS);
   });
 
   // Now that we are sorted by complexity, group elements of the same
@@ -1016,16 +1024,23 @@
 /// where BC(It, k) stands for binomial coefficient.
 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
                                                 ScalarEvolution &SE) const {
-  const SCEV *Result = getStart();
-  for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
+  return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE);
+}
+
+const SCEV *
+SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
+                                    const SCEV *It, ScalarEvolution &SE) {
+  assert(Operands.size() > 0);
+  const SCEV *Result = Operands[0];
+  for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
     // The computation is correct in the face of overflow provided that the
     // multiplication is performed _after_ the evaluation of the binomial
     // coefficient.
-    const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
+    const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType());
     if (isa<SCEVCouldNotCompute>(Coeff))
       return Coeff;
 
-    Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
+    Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff));
   }
   return Result;
 }
@@ -1034,15 +1049,15 @@
 //                    SCEV Expression folder implementations
 //===----------------------------------------------------------------------===//
 
-const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty,
-                                             unsigned Depth) {
-  assert(Ty->isIntegerTy() && "Target type must be an integer type!");
-  assert(Depth <= 1 && "getPtrToIntExpr() should self-recurse at most once.");
+const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
+                                                     unsigned Depth) {
+  assert(Depth <= 1 &&
+         "getLosslessPtrToIntExpr() should self-recurse at most once.");
 
   // We could be called with an integer-typed operands during SCEV rewrites.
   // Since the operand is an integer already, just perform zext/trunc/self cast.
   if (!Op->getType()->isPointerTy())
-    return getTruncateOrZeroExtend(Op, Ty);
+    return Op;
 
   // What would be an ID for such a SCEV cast expression?
   FoldingSetNodeID ID;
@@ -1053,27 +1068,44 @@
 
   // Is there already an expression for such a cast?
   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
-    return getTruncateOrZeroExtend(S, Ty);
+    return S;
+
+  // It isn't legal for optimizations to construct new ptrtoint expressions
+  // for non-integral pointers.
+  if (getDataLayout().isNonIntegralPointerType(Op->getType()))
+    return getCouldNotCompute();
+
+  Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
+
+  // We can only trivially model ptrtoint if SCEV's effective (integer) type
+  // is sufficiently wide to represent all possible pointer values.
+  // We could theoretically teach SCEV to truncate wider pointers, but
+  // that isn't implemented for now.
+  if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) !=
+      getDataLayout().getTypeSizeInBits(IntPtrTy))
+    return getCouldNotCompute();
 
   // If not, is this expression something we can't reduce any further?
-  if (isa<SCEVUnknown>(Op)) {
+  if (auto *U = dyn_cast<SCEVUnknown>(Op)) {
+    // Perform some basic constant folding. If the operand of the ptr2int cast
+    // is a null pointer, don't create a ptr2int SCEV expression (that will be
+    // left as-is), but produce a zero constant.
+    // NOTE: We could handle a more general case, but lack motivational cases.
+    if (isa<ConstantPointerNull>(U->getValue()))
+      return getZero(IntPtrTy);
+
     // Create an explicit cast node.
     // We can reuse the existing insert position since if we get here,
     // we won't have made any changes which would invalidate it.
-    Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
-    assert(getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(
-               Op->getType())) == getDataLayout().getTypeSizeInBits(IntPtrTy) &&
-           "We can only model ptrtoint if SCEV's effective (integer) type is "
-           "sufficiently wide to represent all possible pointer values.");
     SCEV *S = new (SCEVAllocator)
         SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy);
     UniqueSCEVs.InsertNode(S, IP);
     addToLoopUseLists(S);
-    return getTruncateOrZeroExtend(S, Ty);
+    return S;
   }
 
-  assert(Depth == 0 &&
-         "getPtrToIntExpr() should not self-recurse for non-SCEVUnknown's.");
+  assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "
+                       "non-SCEVUnknown's.");
 
   // Otherwise, we've got some expression that is more complex than just a
   // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
@@ -1127,11 +1159,9 @@
     }
 
     const SCEV *visitUnknown(const SCEVUnknown *Expr) {
-      Type *ExprPtrTy = Expr->getType();
-      assert(ExprPtrTy->isPointerTy() &&
+      assert(Expr->getType()->isPointerTy() &&
              "Should only reach pointer-typed SCEVUnknown's.");
-      Type *ExprIntPtrTy = SE.getDataLayout().getIntPtrType(ExprPtrTy);
-      return SE.getPtrToIntExpr(Expr, ExprIntPtrTy, /*Depth=*/1);
+      return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1);
     }
   };
 
@@ -1140,6 +1170,16 @@
   assert(IntOp->getType()->isIntegerTy() &&
          "We must have succeeded in sinking the cast, "
          "and ending up with an integer-typed expression!");
+  return IntOp;
+}
+
+const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) {
+  assert(Ty->isIntegerTy() && "Target type must be an integer type!");
+
+  const SCEV *IntOp = getLosslessPtrToIntExpr(Op);
+  if (isa<SCEVCouldNotCompute>(IntOp))
+    return IntOp;
+
   return getTruncateOrZeroExtend(IntOp, Ty);
 }
 
@@ -1149,6 +1189,7 @@
          "This is not a truncating conversion!");
   assert(isSCEVable(Ty) &&
          "This is not a conversion to a SCEVable type!");
+  assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!");
   Ty = getEffectiveSCEVType(Ty);
 
   FoldingSetNodeID ID;
@@ -1538,6 +1579,7 @@
          "This is not an extending conversion!");
   assert(isSCEVable(Ty) &&
          "This is not a conversion to a SCEVable type!");
+  assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
   Ty = getEffectiveSCEVType(Ty);
 
   // Fold if the operand is constant.
@@ -1840,6 +1882,7 @@
          "This is not an extending conversion!");
   assert(isSCEVable(Ty) &&
          "This is not a conversion to a SCEVable type!");
+  assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
   Ty = getEffectiveSCEVType(Ty);
 
   // Fold if the operand is constant.
@@ -2207,6 +2250,81 @@
   return Interesting;
 }
 
+bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
+                                      const SCEV *LHS, const SCEV *RHS) {
+  const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
+                                            SCEV::NoWrapFlags, unsigned);
+  switch (BinOp) {
+  default:
+    llvm_unreachable("Unsupported binary op");
+  case Instruction::Add:
+    Operation = &ScalarEvolution::getAddExpr;
+    break;
+  case Instruction::Sub:
+    Operation = &ScalarEvolution::getMinusSCEV;
+    break;
+  case Instruction::Mul:
+    Operation = &ScalarEvolution::getMulExpr;
+    break;
+  }
+
+  const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
+      Signed ? &ScalarEvolution::getSignExtendExpr
+             : &ScalarEvolution::getZeroExtendExpr;
+
+  // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
+  auto *NarrowTy = cast<IntegerType>(LHS->getType());
+  auto *WideTy =
+      IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
+
+  const SCEV *A = (this->*Extension)(
+      (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0);
+  const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0),
+                                     (this->*Extension)(RHS, WideTy, 0),
+                                     SCEV::FlagAnyWrap, 0);
+  return A == B;
+}
+
+std::pair<SCEV::NoWrapFlags, bool /*Deduced*/>
+ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
+    const OverflowingBinaryOperator *OBO) {
+  SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
+
+  if (OBO->hasNoUnsignedWrap())
+    Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
+  if (OBO->hasNoSignedWrap())
+    Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
+
+  bool Deduced = false;
+
+  if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
+    return {Flags, Deduced};
+
+  if (OBO->getOpcode() != Instruction::Add &&
+      OBO->getOpcode() != Instruction::Sub &&
+      OBO->getOpcode() != Instruction::Mul)
+    return {Flags, Deduced};
+
+  const SCEV *LHS = getSCEV(OBO->getOperand(0));
+  const SCEV *RHS = getSCEV(OBO->getOperand(1));
+
+  if (!OBO->hasNoUnsignedWrap() &&
+      willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
+                      /* Signed */ false, LHS, RHS)) {
+    Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
+    Deduced = true;
+  }
+
+  if (!OBO->hasNoSignedWrap() &&
+      willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
+                      /* Signed */ true, LHS, RHS)) {
+    Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
+    Deduced = true;
+  }
+
+  return {Flags, Deduced};
+}
+
 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
 // `OldFlags' as can't-wrap behavior.  Infer a more aggressive set of
 // can't-overflow flags for the operation if possible.
@@ -2292,6 +2410,9 @@
   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
            "SCEVAddExpr operand types don't match!");
+  unsigned NumPtrs = count_if(
+      Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
+  assert(NumPtrs <= 1 && "add has at most one pointer operand");
 #endif
 
   // Sort by complexity, this groups all similar expression types together.
@@ -2425,6 +2546,48 @@
     }
   }
 
+  if (Ops.size() == 2) {
+    // Check if we have an expression of the form ((X + C1) - C2), where C1 and
+    // C2 can be folded in a way that allows retaining wrapping flags of (X +
+    // C1).
+    const SCEV *A = Ops[0];
+    const SCEV *B = Ops[1];
+    auto *AddExpr = dyn_cast<SCEVAddExpr>(B);
+    auto *C = dyn_cast<SCEVConstant>(A);
+    if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
+      auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
+      auto C2 = C->getAPInt();
+      SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap;
+
+      APInt ConstAdd = C1 + C2;
+      auto AddFlags = AddExpr->getNoWrapFlags();
+      // Adding a smaller constant is NUW if the original AddExpr was NUW.
+      if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNUW) ==
+              SCEV::FlagNUW &&
+          ConstAdd.ule(C1)) {
+        PreservedFlags =
+            ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW);
+      }
+
+      // Adding a constant with the same sign and small magnitude is NSW, if the
+      // original AddExpr was NSW.
+      if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNSW) ==
+              SCEV::FlagNSW &&
+          C1.isSignBitSet() == ConstAdd.isSignBitSet() &&
+          ConstAdd.abs().ule(C1.abs())) {
+        PreservedFlags =
+            ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW);
+      }
+
+      if (PreservedFlags != SCEV::FlagAnyWrap) {
+        SmallVector<const SCEV *, 4> NewOps(AddExpr->op_begin(),
+                                            AddExpr->op_end());
+        NewOps[0] = getConstant(ConstAdd);
+        return getAddExpr(NewOps, PreservedFlags);
+      }
+    }
+  }
+
   // Skip past any other cast SCEVs.
   while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
     ++Idx;
@@ -2432,6 +2595,10 @@
   // If there are add operands they would be next.
   if (Idx < Ops.size()) {
     bool DeletedAdd = false;
+    // If the original flags and all inlined SCEVAddExprs are NUW, use the
+    // common NUW flag for expression after inlining. Other flags cannot be
+    // preserved, because they may depend on the original order of operations.
+    SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW);
     while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
       if (Ops.size() > AddOpsInlineThreshold ||
           Add->getNumOperands() > AddOpsInlineThreshold)
@@ -2441,13 +2608,14 @@
       Ops.erase(Ops.begin()+Idx);
       Ops.append(Add->op_begin(), Add->op_end());
       DeletedAdd = true;
+      CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags());
     }
 
     // If we deleted at least one add, we added operands to the end of the list,
     // and they are not necessarily sorted.  Recurse to resort and resimplify
     // any operands we just acquired.
     if (DeletedAdd)
-      return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
+      return getAddExpr(Ops, CommonFlags, Depth + 1);
   }
 
   // Skip over the add expression until we get to a multiply.
@@ -2480,12 +2648,16 @@
       Ops.clear();
       if (AccumulatedConstant != 0)
         Ops.push_back(getConstant(AccumulatedConstant));
-      for (auto &MulOp : MulOpLists)
-        if (MulOp.first != 0)
+      for (auto &MulOp : MulOpLists) {
+        if (MulOp.first == 1) {
+          Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
+        } else if (MulOp.first != 0) {
           Ops.push_back(getMulExpr(
               getConstant(MulOp.first),
               getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
               SCEV::FlagAnyWrap, Depth + 1));
+        }
+      }
       if (Ops.empty())
         return getZero(Ty);
       if (Ops.size() == 1)
@@ -2804,9 +2976,10 @@
   assert(!Ops.empty() && "Cannot get empty mul!");
   if (Ops.size() == 1) return Ops[0];
 #ifndef NDEBUG
-  Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
+  Type *ETy = Ops[0]->getType();
+  assert(!ETy->isPointerTy());
   for (unsigned i = 1, e = Ops.size(); i != e; ++i)
-    assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
+    assert(Ops[i]->getType() == ETy &&
            "SCEVMulExpr operand types don't match!");
 #endif
 
@@ -3091,8 +3264,9 @@
 /// possible.
 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
                                          const SCEV *RHS) {
-  assert(getEffectiveSCEVType(LHS->getType()) ==
-         getEffectiveSCEVType(RHS->getType()) &&
+  assert(!LHS->getType()->isPointerTy() &&
+         "SCEVUDivExpr operand can't be pointer!");
+  assert(LHS->getType() == RHS->getType() &&
          "SCEVUDivExpr operand types don't match!");
 
   FoldingSetNodeID ID;
@@ -3103,6 +3277,11 @@
   if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
     return S;
 
+  // 0 udiv Y == 0
+  if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS))
+    if (LHSC->getValue()->isZero())
+      return LHS;
+
   if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
     if (RHSC->getValue()->isOne())
       return LHS;                               // X udiv 1 --> x
@@ -3336,9 +3515,11 @@
   if (Operands.size() == 1) return Operands[0];
 #ifndef NDEBUG
   Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
-  for (unsigned i = 1, e = Operands.size(); i != e; ++i)
+  for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
     assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
            "SCEVAddRecExpr operand types don't match!");
+    assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer");
+  }
   for (unsigned i = 0, e = Operands.size(); i != e; ++i)
     assert(isLoopInvariant(Operands[i], L) &&
            "SCEVAddRecExpr operand is not loop-invariant!");
@@ -3486,20 +3667,19 @@
   return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
 }
 
-const SCEV *ScalarEvolution::getSignumExpr(const SCEV *Op) {
-  Type *Ty = Op->getType();
-  return getSMinExpr(getSMaxExpr(Op, getMinusOne(Ty)), getOne(Ty));
-}
-
 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
                                            SmallVectorImpl<const SCEV *> &Ops) {
   assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
   if (Ops.size() == 1) return Ops[0];
 #ifndef NDEBUG
   Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
-  for (unsigned i = 1, e = Ops.size(); i != e; ++i)
+  for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
     assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
            "Operand types don't match!");
+    assert(Ops[0]->getType()->isPointerTy() ==
+               Ops[i]->getType()->isPointerTy() &&
+           "min/max should be consistently pointerish");
+  }
 #endif
 
   bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
@@ -3807,7 +3987,7 @@
 
 /// Return the ValueOffsetPair set for \p S. \p S can be represented
 /// by the value and offset from any ValueOffsetPair in the set.
-SetVector<ScalarEvolution::ValueOffsetPair> *
+ScalarEvolution::ValueOffsetPairSetVector *
 ScalarEvolution::getSCEVValues(const SCEV *S) {
   ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
   if (SI == ExprValueMap.end())
@@ -3830,7 +4010,7 @@
   if (I != ValueExprMap.end()) {
     const SCEV *S = I->second;
     // Remove {V, 0} from the set of ExprValueMap[S]
-    if (SetVector<ValueOffsetPair> *SV = getSCEVValues(S))
+    if (auto *SV = getSCEVValues(S))
       SV->remove({V, nullptr});
 
     // Remove {V, Offset} from the set of ExprValueMap[Stripped]
@@ -3838,7 +4018,7 @@
     ConstantInt *Offset;
     std::tie(Stripped, Offset) = splitAddExpr(S);
     if (Offset != nullptr) {
-      if (SetVector<ValueOffsetPair> *SV = getSCEVValues(Stripped))
+      if (auto *SV = getSCEVValues(Stripped))
         SV->remove({V, Offset});
     }
     ValueExprMap.erase(V);
@@ -3966,6 +4146,41 @@
   return getMinusSCEV(getMinusOne(Ty), V);
 }
 
+/// Compute an expression equivalent to S - getPointerBase(S).
+static const SCEV *removePointerBase(ScalarEvolution *SE, const SCEV *P) {
+  assert(P->getType()->isPointerTy());
+
+  if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
+    // The base of an AddRec is the first operand.
+    SmallVector<const SCEV *> Ops{AddRec->operands()};
+    Ops[0] = removePointerBase(SE, Ops[0]);
+    // Don't try to transfer nowrap flags for now. We could in some cases
+    // (for example, if pointer operand of the AddRec is a SCEVUnknown).
+    return SE->getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap);
+  }
+  if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
+    // The base of an Add is the pointer operand.
+    SmallVector<const SCEV *> Ops{Add->operands()};
+    const SCEV **PtrOp = nullptr;
+    for (const SCEV *&AddOp : Ops) {
+      if (AddOp->getType()->isPointerTy()) {
+        // If we find an Add with multiple pointer operands, treat it as a
+        // pointer base to be consistent with getPointerBase.  Eventually
+        // we should be able to assert this is impossible.
+        if (PtrOp)
+          return SE->getZero(P->getType());
+        PtrOp = &AddOp;
+      }
+    }
+    *PtrOp = removePointerBase(SE, *PtrOp);
+    // Don't try to transfer nowrap flags for now. We could in some cases
+    // (for example, if the pointer operand of the Add is a SCEVUnknown).
+    return SE->getAddExpr(Ops);
+  }
+  // Any other expression must be a pointer base.
+  return SE->getZero(P->getType());
+}
+
 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
                                           SCEV::NoWrapFlags Flags,
                                           unsigned Depth) {
@@ -3973,6 +4188,17 @@
   if (LHS == RHS)
     return getZero(LHS->getType());
 
+  // If we subtract two pointers with different pointer bases, bail.
+  // Eventually, we're going to add an assertion to getMulExpr that we
+  // can't multiply by a pointer.
+  if (RHS->getType()->isPointerTy()) {
+    if (!LHS->getType()->isPointerTy() ||
+        getPointerBase(LHS) != getPointerBase(RHS))
+      return getCouldNotCompute();
+    LHS = removePointerBase(this, LHS);
+    RHS = removePointerBase(this, RHS);
+  }
+
   // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
   // makes it so that we cannot make much use of NUW.
   auto AddFlags = SCEV::FlagAnyWrap;
@@ -4127,16 +4353,16 @@
     return V;
 
   while (true) {
-    if (const SCEVIntegralCastExpr *Cast = dyn_cast<SCEVIntegralCastExpr>(V)) {
-      V = Cast->getOperand();
-    } else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
+    if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
+      V = AddRec->getStart();
+    } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) {
       const SCEV *PtrOp = nullptr;
-      for (const SCEV *NAryOp : NAry->operands()) {
-        if (NAryOp->getType()->isPointerTy()) {
+      for (const SCEV *AddOp : Add->operands()) {
+        if (AddOp->getType()->isPointerTy()) {
           // Cannot find the base of an expression with multiple pointer ops.
           if (PtrOp)
             return V;
-          PtrOp = NAryOp;
+          PtrOp = AddOp;
         }
       }
       if (!PtrOp) // All operands were non-pointer.
@@ -4542,7 +4768,6 @@
   Value *RHS;
   bool IsNSW = false;
   bool IsNUW = false;
-  bool IsExact = false;
 
   /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
   /// constant expression.
@@ -4555,14 +4780,11 @@
       IsNSW = OBO->hasNoSignedWrap();
       IsNUW = OBO->hasNoUnsignedWrap();
     }
-    if (auto *PEO = dyn_cast<PossiblyExactOperator>(Op))
-      IsExact = PEO->isExact();
   }
 
   explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
-                    bool IsNUW = false, bool IsExact = false)
-      : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW),
-        IsExact(IsExact) {}
+                    bool IsNUW = false)
+      : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
 };
 
 } // end anonymous namespace
@@ -5429,48 +5651,57 @@
   switch (ICI->getPredicate()) {
   case ICmpInst::ICMP_SLT:
   case ICmpInst::ICMP_SLE:
-    std::swap(LHS, RHS);
-    LLVM_FALLTHROUGH;
-  case ICmpInst::ICMP_SGT:
-  case ICmpInst::ICMP_SGE:
-    // a >s b ? a+x : b+x  ->  smax(a, b)+x
-    // a >s b ? b+x : a+x  ->  smin(a, b)+x
-    if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
-      const SCEV *LS = getNoopOrSignExtend(getSCEV(LHS), I->getType());
-      const SCEV *RS = getNoopOrSignExtend(getSCEV(RHS), I->getType());
-      const SCEV *LA = getSCEV(TrueVal);
-      const SCEV *RA = getSCEV(FalseVal);
-      const SCEV *LDiff = getMinusSCEV(LA, LS);
-      const SCEV *RDiff = getMinusSCEV(RA, RS);
-      if (LDiff == RDiff)
-        return getAddExpr(getSMaxExpr(LS, RS), LDiff);
-      LDiff = getMinusSCEV(LA, RS);
-      RDiff = getMinusSCEV(RA, LS);
-      if (LDiff == RDiff)
-        return getAddExpr(getSMinExpr(LS, RS), LDiff);
-    }
-    break;
   case ICmpInst::ICMP_ULT:
   case ICmpInst::ICMP_ULE:
     std::swap(LHS, RHS);
     LLVM_FALLTHROUGH;
+  case ICmpInst::ICMP_SGT:
+  case ICmpInst::ICMP_SGE:
   case ICmpInst::ICMP_UGT:
   case ICmpInst::ICMP_UGE:
-    // a >u b ? a+x : b+x  ->  umax(a, b)+x
-    // a >u b ? b+x : a+x  ->  umin(a, b)+x
+    // a > b ? a+x : b+x  ->  max(a, b)+x
+    // a > b ? b+x : a+x  ->  min(a, b)+x
     if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
-      const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
-      const SCEV *RS = getNoopOrZeroExtend(getSCEV(RHS), I->getType());
+      bool Signed = ICI->isSigned();
       const SCEV *LA = getSCEV(TrueVal);
       const SCEV *RA = getSCEV(FalseVal);
+      const SCEV *LS = getSCEV(LHS);
+      const SCEV *RS = getSCEV(RHS);
+      if (LA->getType()->isPointerTy()) {
+        // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
+        // Need to make sure we can't produce weird expressions involving
+        // negated pointers.
+        if (LA == LS && RA == RS)
+          return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS);
+        if (LA == RS && RA == LS)
+          return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS);
+      }
+      auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * {
+        if (Op->getType()->isPointerTy()) {
+          Op = getLosslessPtrToIntExpr(Op);
+          if (isa<SCEVCouldNotCompute>(Op))
+            return Op;
+        }
+        if (Signed)
+          Op = getNoopOrSignExtend(Op, I->getType());
+        else
+          Op = getNoopOrZeroExtend(Op, I->getType());
+        return Op;
+      };
+      LS = CoerceOperand(LS);
+      RS = CoerceOperand(RS);
+      if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
+        break;
       const SCEV *LDiff = getMinusSCEV(LA, LS);
       const SCEV *RDiff = getMinusSCEV(RA, RS);
       if (LDiff == RDiff)
-        return getAddExpr(getUMaxExpr(LS, RS), LDiff);
+        return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS),
+                          LDiff);
       LDiff = getMinusSCEV(LA, RS);
       RDiff = getMinusSCEV(RA, LS);
       if (LDiff == RDiff)
-        return getAddExpr(getUMinExpr(LS, RS), LDiff);
+        return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS),
+                          LDiff);
     }
     break;
   case ICmpInst::ICMP_NE:
@@ -5516,8 +5747,8 @@
     return getUnknown(GEP);
 
   SmallVector<const SCEV *, 4> IndexExprs;
-  for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
-    IndexExprs.push_back(getSCEV(*Index));
+  for (Value *Index : GEP->indices())
+    IndexExprs.push_back(getSCEV(Index));
   return getGEPExpr(GEP, IndexExprs);
 }
 
@@ -5628,6 +5859,125 @@
   }
 }
 
+ConstantRange ScalarEvolution::
+getRangeForUnknownRecurrence(const SCEVUnknown *U) {
+  const DataLayout &DL = getDataLayout();
+
+  unsigned BitWidth = getTypeSizeInBits(U->getType());
+  const ConstantRange FullSet(BitWidth, /*isFullSet=*/true);
+
+  // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then
+  // use information about the trip count to improve our available range.  Note
+  // that the trip count independent cases are already handled by known bits.
+  // WARNING: The definition of recurrence used here is subtly different than
+  // the one used by AddRec (and thus most of this file).  Step is allowed to
+  // be arbitrarily loop varying here, where AddRec allows only loop invariant
+  // and other addrecs in the same loop (for non-affine addrecs).  The code
+  // below intentionally handles the case where step is not loop invariant.
+  auto *P = dyn_cast<PHINode>(U->getValue());
+  if (!P)
+    return FullSet;
+
+  // Make sure that no Phi input comes from an unreachable block. Otherwise,
+  // even the values that are not available in these blocks may come from them,
+  // and this leads to false-positive recurrence test.
+  for (auto *Pred : predecessors(P->getParent()))
+    if (!DT.isReachableFromEntry(Pred))
+      return FullSet;
+
+  BinaryOperator *BO;
+  Value *Start, *Step;
+  if (!matchSimpleRecurrence(P, BO, Start, Step))
+    return FullSet;
+
+  // If we found a recurrence in reachable code, we must be in a loop. Note
+  // that BO might be in some subloop of L, and that's completely okay.
+  auto *L = LI.getLoopFor(P->getParent());
+  assert(L && L->getHeader() == P->getParent());
+  if (!L->contains(BO->getParent()))
+    // NOTE: This bailout should be an assert instead.  However, asserting
+    // the condition here exposes a case where LoopFusion is querying SCEV
+    // with malformed loop information during the midst of the transform.
+    // There doesn't appear to be an obvious fix, so for the moment bailout
+    // until the caller issue can be fixed.  PR49566 tracks the bug.
+    return FullSet;
+
+  // TODO: Extend to other opcodes such as mul, and div
+  switch (BO->getOpcode()) {
+  default:
+    return FullSet;
+  case Instruction::AShr:
+  case Instruction::LShr:
+  case Instruction::Shl:
+    break;
+  };
+
+  if (BO->getOperand(0) != P)
+    // TODO: Handle the power function forms some day.
+    return FullSet;
+
+  unsigned TC = getSmallConstantMaxTripCount(L);
+  if (!TC || TC >= BitWidth)
+    return FullSet;
+
+  auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT);
+  auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT);
+  assert(KnownStart.getBitWidth() == BitWidth &&
+         KnownStep.getBitWidth() == BitWidth);
+
+  // Compute total shift amount, being careful of overflow and bitwidths.
+  auto MaxShiftAmt = KnownStep.getMaxValue();
+  APInt TCAP(BitWidth, TC-1);
+  bool Overflow = false;
+  auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
+  if (Overflow)
+    return FullSet;
+
+  switch (BO->getOpcode()) {
+  default:
+    llvm_unreachable("filtered out above");
+  case Instruction::AShr: {
+    // For each ashr, three cases:
+    //   shift = 0 => unchanged value
+    //   saturation => 0 or -1
+    //   other => a value closer to zero (of the same sign)
+    // Thus, the end value is closer to zero than the start.
+    auto KnownEnd = KnownBits::ashr(KnownStart,
+                                    KnownBits::makeConstant(TotalShift));
+    if (KnownStart.isNonNegative())
+      // Analogous to lshr (simply not yet canonicalized)
+      return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
+                                        KnownStart.getMaxValue() + 1);
+    if (KnownStart.isNegative())
+      // End >=u Start && End <=s Start
+      return ConstantRange::getNonEmpty(KnownStart.getMinValue(),
+                                        KnownEnd.getMaxValue() + 1);
+    break;
+  }
+  case Instruction::LShr: {
+    // For each lshr, three cases:
+    //   shift = 0 => unchanged value
+    //   saturation => 0
+    //   other => a smaller positive number
+    // Thus, the low end of the unsigned range is the last value produced.
+    auto KnownEnd = KnownBits::lshr(KnownStart,
+                                    KnownBits::makeConstant(TotalShift));
+    return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
+                                      KnownStart.getMaxValue() + 1);
+  }
+  case Instruction::Shl: {
+    // Iff no bits are shifted out, value increases on every shift.
+    auto KnownEnd = KnownBits::shl(KnownStart,
+                                   KnownBits::makeConstant(TotalShift));
+    if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
+      return ConstantRange(KnownStart.getMinValue(),
+                           KnownEnd.getMaxValue() + 1);
+    break;
+  }
+  };
+  return FullSet;
+}
+
 /// Determine the range for a particular SCEV.  If SignHint is
 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
 /// with a "cleaner" unsigned (resp. signed) representation.
@@ -5828,46 +6178,55 @@
   }
 
   if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
+
     // Check if the IR explicitly contains !range metadata.
     Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
     if (MDRange.hasValue())
       ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
                                                             RangeType);
 
-    // Split here to avoid paying the compile-time cost of calling both
-    // computeKnownBits and ComputeNumSignBits.  This restriction can be lifted
-    // if needed.
+    // Use facts about recurrences in the underlying IR.  Note that add
+    // recurrences are AddRecExprs and thus don't hit this path.  This
+    // primarily handles shift recurrences.
+    auto CR = getRangeForUnknownRecurrence(U);
+    ConservativeResult = ConservativeResult.intersectWith(CR);
+
+    // See if ValueTracking can give us a useful range.
     const DataLayout &DL = getDataLayout();
-    if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
-      // For a SCEVUnknown, ask ValueTracking.
-      KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
-      if (Known.getBitWidth() != BitWidth)
-        Known = Known.zextOrTrunc(BitWidth);
-      // If Known does not result in full-set, intersect with it.
-      if (Known.getMinValue() != Known.getMaxValue() + 1)
-        ConservativeResult = ConservativeResult.intersectWith(
-            ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
-            RangeType);
-    } else {
-      assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
-             "generalize as needed!");
-      unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
+    KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
+    if (Known.getBitWidth() != BitWidth)
+      Known = Known.zextOrTrunc(BitWidth);
+
+    // ValueTracking may be able to compute a tighter result for the number of
+    // sign bits than for the value of those sign bits.
+    unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
+    if (U->getType()->isPointerTy()) {
       // If the pointer size is larger than the index size type, this can cause
       // NS to be larger than BitWidth. So compensate for this.
-      if (U->getType()->isPointerTy()) {
-        unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
-        int ptrIdxDiff = ptrSize - BitWidth;
-        if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
-          NS -= ptrIdxDiff;
-      }
-
-      if (NS > 1)
-        ConservativeResult = ConservativeResult.intersectWith(
-            ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
-                          APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
-            RangeType);
+      unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
+      int ptrIdxDiff = ptrSize - BitWidth;
+      if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
+        NS -= ptrIdxDiff;
     }
 
+    if (NS > 1) {
+      // If we know any of the sign bits, we know all of the sign bits.
+      if (!Known.Zero.getHiBits(NS).isNullValue())
+        Known.Zero.setHighBits(NS);
+      if (!Known.One.getHiBits(NS).isNullValue())
+        Known.One.setHighBits(NS);
+    }
+
+    if (Known.getMinValue() != Known.getMaxValue() + 1)
+      ConservativeResult = ConservativeResult.intersectWith(
+          ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
+          RangeType);
+    if (NS > 1)
+      ConservativeResult = ConservativeResult.intersectWith(
+          ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
+                        APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
+          RangeType);
+
     // A range of Phi is a subset of union of all ranges of its input.
     if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
       // Make sure that we do not run over cycled Phis.
@@ -6318,7 +6677,7 @@
       if (auto *SI = dyn_cast<StoreInst>(I))
         return !SI->isSimple();
 
-      return I->mayHaveSideEffects();
+      return I->mayThrow() || I->mayWriteToMemory();
     };
 
     LoopProperties LP = {/* HasNoAbnormalExits */ true,
@@ -6342,6 +6701,13 @@
   return Itr->second;
 }
 
+bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) {
+  // A mustprogress loop without side effects must be finite.
+  // TODO: The check used here is very conservative.  It's only *specific*
+  // side effects which are well defined in infinite loops.
+  return isMustProgress(L) && loopHasNoSideEffects(L);
+}
+
 const SCEV *ScalarEvolution::createSCEV(Value *V) {
   if (!isSCEVable(V->getType()))
     return getUnknown(V);
@@ -6355,9 +6721,6 @@
       return getUnknown(UndefValue::get(V->getType()));
   } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
     return getConstant(CI);
-  else if (isa<ConstantPointerNull>(V))
-    // FIXME: we shouldn't special-case null pointer constant.
-    return getZero(V->getType());
   else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
     return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
   else if (!isa<ConstantExpr>(V))
@@ -6647,15 +7010,6 @@
           }
         }
       }
-      if (BO->IsExact) {
-        // Given exact arithmetic in-bounds right-shift by a constant,
-        // we can lower it into:  (abs(x) EXACT/u (1<<C)) * signum(x)
-        const SCEV *X = getSCEV(BO->LHS);
-        const SCEV *AbsX = getAbsExpr(X, /*IsNSW=*/false);
-        APInt Mult = APInt::getOneBitSet(BitWidth, AShrAmt);
-        const SCEV *Div = getUDivExactExpr(AbsX, getConstant(Mult));
-        return getMulExpr(Div, getSignumExpr(X), SCEV::FlagNSW);
-      }
       break;
     }
     }
@@ -6694,22 +7048,14 @@
 
   case Instruction::PtrToInt: {
     // Pointer to integer cast is straight-forward, so do model it.
-    Value *Ptr = U->getOperand(0);
-    const SCEV *Op = getSCEV(Ptr);
+    const SCEV *Op = getSCEV(U->getOperand(0));
     Type *DstIntTy = U->getType();
-    // SCEV doesn't have constant pointer expression type, but it supports
-    // nullptr constant (and only that one), which is modelled in SCEV as a
-    // zero integer constant. So just skip the ptrtoint cast for constants.
-    if (isa<SCEVConstant>(Op))
-      return getTruncateOrZeroExtend(Op, DstIntTy);
-    Type *PtrTy = Ptr->getType();
-    Type *IntPtrTy = getDataLayout().getIntPtrType(PtrTy);
     // But only if effective SCEV (integer) type is wide enough to represent
     // all possible pointer values.
-    if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(PtrTy)) !=
-        getDataLayout().getTypeSizeInBits(IntPtrTy))
+    const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy);
+    if (isa<SCEVCouldNotCompute>(IntOp))
       return getUnknown(V);
-    return getPtrToIntExpr(Op, DstIntTy);
+    return IntOp;
   }
   case Instruction::IntToPtr:
     // Just don't deal with inttoptr casts.
@@ -6798,6 +7144,12 @@
 //                   Iteration Count Computation Code
 //
 
+const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) {
+  // Get the trip count from the BE count by adding 1.  Overflow, results
+  // in zero which means "unknown".
+  return getAddExpr(ExitCount, getOne(ExitCount->getType()));
+}
+
 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
   if (!ExitCount)
     return 0;
@@ -6813,11 +7165,8 @@
 }
 
 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
-  if (BasicBlock *ExitingBB = L->getExitingBlock())
-    return getSmallConstantTripCount(L, ExitingBB);
-
-  // No trip count information for multiple exits.
-  return 0;
+  auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact));
+  return getConstantTripCount(ExitCount);
 }
 
 unsigned
@@ -6838,11 +7187,45 @@
 }
 
 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
-  if (BasicBlock *ExitingBB = L->getExitingBlock())
-    return getSmallConstantTripMultiple(L, ExitingBB);
+  SmallVector<BasicBlock *, 8> ExitingBlocks;
+  L->getExitingBlocks(ExitingBlocks);
 
-  // No trip multiple information for multiple exits.
-  return 0;
+  Optional<unsigned> Res = None;
+  for (auto *ExitingBB : ExitingBlocks) {
+    unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB);
+    if (!Res)
+      Res = Multiple;
+    Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple);
+  }
+  return Res.getValueOr(1);
+}
+
+unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
+                                                       const SCEV *ExitCount) {
+  if (ExitCount == getCouldNotCompute())
+    return 1;
+
+  // Get the trip count
+  const SCEV *TCExpr = getTripCountFromExitCount(ExitCount);
+
+  const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
+  if (!TC)
+    // Attempt to factor more general cases. Returns the greatest power of
+    // two divisor. If overflow happens, the trip count expression is still
+    // divisible by the greatest power of 2 divisor returned.
+    return 1U << std::min((uint32_t)31,
+                          GetMinTrailingZeros(applyLoopGuards(TCExpr, L)));
+
+  ConstantInt *Result = TC->getValue();
+
+  // Guard against huge trip counts (this requires checking
+  // for zero to handle the case where the trip count == -1 and the
+  // addition wraps).
+  if (!Result || Result->getValue().getActiveBits() > 32 ||
+      Result->getValue().getActiveBits() == 0)
+    return 1;
+
+  return (unsigned)Result->getZExtValue();
 }
 
 /// Returns the largest constant divisor of the trip count of this loop as a
@@ -6864,29 +7247,7 @@
   assert(L->isLoopExiting(ExitingBlock) &&
          "Exiting block must actually branch out of the loop!");
   const SCEV *ExitCount = getExitCount(L, ExitingBlock);
-  if (ExitCount == getCouldNotCompute())
-    return 1;
-
-  // Get the trip count from the BE count by adding 1.
-  const SCEV *TCExpr = getAddExpr(ExitCount, getOne(ExitCount->getType()));
-
-  const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
-  if (!TC)
-    // Attempt to factor more general cases. Returns the greatest power of
-    // two divisor. If overflow happens, the trip count expression is still
-    // divisible by the greatest power of 2 divisor returned.
-    return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr));
-
-  ConstantInt *Result = TC->getValue();
-
-  // Guard against huge trip counts (this requires checking
-  // for zero to handle the case where the trip count == -1 and the
-  // addition wraps).
-  if (!Result || Result->getValue().getActiveBits() > 32 ||
-      Result->getValue().getActiveBits() == 0)
-    return 1;
-
-  return (unsigned)Result->getZExtValue();
+  return getSmallConstantTripMultiple(L, ExitCount);
 }
 
 const SCEV *ScalarEvolution::getExitCount(const Loop *L,
@@ -7077,16 +7438,6 @@
 }
 
 void ScalarEvolution::forgetLoop(const Loop *L) {
-  // Drop any stored trip count value.
-  auto RemoveLoopFromBackedgeMap =
-      [](DenseMap<const Loop *, BackedgeTakenInfo> &Map, const Loop *L) {
-        auto BTCPos = Map.find(L);
-        if (BTCPos != Map.end()) {
-          BTCPos->second.clear();
-          Map.erase(BTCPos);
-        }
-      };
-
   SmallVector<const Loop *, 16> LoopWorklist(1, L);
   SmallVector<Instruction *, 32> Worklist;
   SmallPtrSet<Instruction *, 16> Visited;
@@ -7095,8 +7446,9 @@
   while (!LoopWorklist.empty()) {
     auto *CurrL = LoopWorklist.pop_back_val();
 
-    RemoveLoopFromBackedgeMap(BackedgeTakenCounts, CurrL);
-    RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts, CurrL);
+    // Drop any stored trip count value.
+    BackedgeTakenCounts.erase(CurrL);
+    PredicatedBackedgeTakenCounts.erase(CurrL);
 
     // Drop information about predicated SCEV rewrites for this loop.
     for (auto I = PredicatedSCEVRewrites.begin();
@@ -7271,25 +7623,12 @@
   return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
 }
 
-bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
-                                                    ScalarEvolution *SE) const {
-  if (getConstantMax() && getConstantMax() != SE->getCouldNotCompute() &&
-      SE->hasOperand(getConstantMax(), S))
-    return true;
-
-  for (auto &ENT : ExitNotTaken)
-    if (ENT.ExactNotTaken != SE->getCouldNotCompute() &&
-        SE->hasOperand(ENT.ExactNotTaken, S))
-      return true;
-
-  return false;
+bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S) const {
+  return Operands.contains(S);
 }
 
 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
-    : ExactNotTaken(E), MaxNotTaken(E) {
-  assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
-          isa<SCEVConstant>(MaxNotTaken)) &&
-         "No point in having a non-constant max backedge taken count!");
+    : ExitLimit(E, E, false, None) {
 }
 
 ScalarEvolution::ExitLimit::ExitLimit(
@@ -7305,25 +7644,36 @@
   for (auto *PredSet : PredSetList)
     for (auto *P : *PredSet)
       addPredicate(P);
+  assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) &&
+         "Backedge count should be int");
+  assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) &&
+         "Max backedge count should be int");
 }
 
 ScalarEvolution::ExitLimit::ExitLimit(
     const SCEV *E, const SCEV *M, bool MaxOrZero,
     const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
     : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
-  assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
-          isa<SCEVConstant>(MaxNotTaken)) &&
-         "No point in having a non-constant max backedge taken count!");
 }
 
 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
                                       bool MaxOrZero)
     : ExitLimit(E, M, MaxOrZero, None) {
-  assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
-          isa<SCEVConstant>(MaxNotTaken)) &&
-         "No point in having a non-constant max backedge taken count!");
 }
 
+class SCEVRecordOperands {
+  SmallPtrSetImpl<const SCEV *> &Operands;
+
+public:
+  SCEVRecordOperands(SmallPtrSetImpl<const SCEV *> &Operands)
+    : Operands(Operands) {}
+  bool follow(const SCEV *S) {
+    Operands.insert(S);
+    return true;
+  }
+  bool isDone() { return false; }
+};
+
 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
 /// computable exit into a persistent ExitNotTakenInfo array.
 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
@@ -7352,11 +7702,14 @@
   assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
           isa<SCEVConstant>(ConstantMax)) &&
          "No point in having a non-constant max backedge taken count!");
-}
 
-/// Invalidate this result and free the ExitNotTakenInfo array.
-void ScalarEvolution::BackedgeTakenInfo::clear() {
-  ExitNotTaken.clear();
+  SCEVRecordOperands RecordOperands(Operands);
+  SCEVTraversal<SCEVRecordOperands> ST(RecordOperands);
+  if (!isa<SCEVCouldNotCompute>(ConstantMax))
+    ST.visitAll(ConstantMax);
+  for (auto &ENT : ExitNotTaken)
+    if (!isa<SCEVCouldNotCompute>(ENT.ExactNotTaken))
+      ST.visitAll(ENT.ExactNotTaken);
 }
 
 /// Compute the number of times the backedge of the specified loop will execute.
@@ -7720,6 +8073,16 @@
   switch (Pred) {
   case ICmpInst::ICMP_NE: {                     // while (X != Y)
     // Convert to: while (X-Y != 0)
+    if (LHS->getType()->isPointerTy()) {
+      LHS = getLosslessPtrToIntExpr(LHS);
+      if (isa<SCEVCouldNotCompute>(LHS))
+        return LHS;
+    }
+    if (RHS->getType()->isPointerTy()) {
+      RHS = getLosslessPtrToIntExpr(RHS);
+      if (isa<SCEVCouldNotCompute>(RHS))
+        return RHS;
+    }
     ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
                                 AllowPredicates);
     if (EL.hasAnyInfo()) return EL;
@@ -7727,6 +8090,16 @@
   }
   case ICmpInst::ICMP_EQ: {                     // while (X == Y)
     // Convert to: while (X-Y == 0)
+    if (LHS->getType()->isPointerTy()) {
+      LHS = getLosslessPtrToIntExpr(LHS);
+      if (isa<SCEVCouldNotCompute>(LHS))
+        return LHS;
+    }
+    if (RHS->getType()->isPointerTy()) {
+      RHS = getLosslessPtrToIntExpr(RHS);
+      if (isa<SCEVCouldNotCompute>(RHS))
+        return RHS;
+    }
     ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
     if (EL.hasAnyInfo()) return EL;
     break;
@@ -7843,9 +8216,10 @@
   Idx = getSCEVAtScope(Idx, L);
 
   // We can only recognize very limited forms of loop index expressions, in
-  // particular, only affine AddRec's like {C1,+,C2}.
+  // particular, only affine AddRec's like {C1,+,C2}<L>.
   const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
-  if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
+  if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() ||
+      isLoopInvariant(IdxExpr, L) ||
       !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
       !isa<SCEVConstant>(IdxExpr->getOperand(1)))
     return getCouldNotCompute();
@@ -7982,7 +8356,7 @@
     // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
     // bitwidth(K) iterations.
     Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
-    KnownBits Known = computeKnownBits(FirstValue, DL, 0, nullptr,
+    KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC,
                                        Predecessor->getTerminator(), &DT);
     auto *Ty = cast<IntegerType>(RHS->getType());
     if (Known.isNonNegative())
@@ -8418,13 +8792,12 @@
         if (C2->getType()->isPointerTy())
           return nullptr;
 
-        if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
-          if (PTy->getElementType()->isStructTy())
-            C2 = ConstantExpr::getIntegerCast(
-                C2, Type::getInt32Ty(C->getContext()), true);
-          C = ConstantExpr::getGetElementPtr(PTy->getElementType(), C, C2);
-        } else
+        if (C->getType()->isPointerTy()) {
+          C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()),
+                                             C, C2);
+        } else {
           C = ConstantExpr::getAdd(C, C2);
+        }
       }
       return C;
     }
@@ -9145,10 +9518,15 @@
       loopHasNoAbnormalExits(AddRec->getLoop())) {
     const SCEV *Exact =
         getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
-    const SCEV *Max =
-        Exact == getCouldNotCompute()
-            ? Exact
-            : getConstant(getUnsignedRangeMax(Exact));
+    const SCEV *Max = getCouldNotCompute();
+    if (Exact != getCouldNotCompute()) {
+      APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L));
+      APInt BaseMaxInt = getUnsignedRangeMax(Exact);
+      if (BaseMaxInt.ult(MaxInt))
+        Max = getConstant(BaseMaxInt);
+      else
+        Max = getConstant(MaxInt);
+    }
     return ExitLimit(Exact, Max, false, Predicates);
   }
 
@@ -9440,7 +9818,7 @@
 }
 
 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
-  return isKnownNegative(S) || isKnownPositive(S);
+  return getUnsignedRangeMin(S) != 0;
 }
 
 std::pair<const SCEV *, const SCEV *>
@@ -9521,6 +9899,16 @@
   return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
 }
 
+Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
+                                                  const SCEV *LHS,
+                                                  const SCEV *RHS) {
+  if (isKnownPredicate(Pred, LHS, RHS))
+    return true;
+  else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS))
+    return false;
+  return None;
+}
+
 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
                                          const SCEV *LHS, const SCEV *RHS,
                                          const Instruction *Context) {
@@ -9529,6 +9917,23 @@
          isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS);
 }
 
+Optional<bool>
+ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
+                                     const SCEV *RHS,
+                                     const Instruction *Context) {
+  Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS);
+  if (KnownWithoutContext)
+    return KnownWithoutContext;
+
+  if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS))
+    return true;
+  else if (isBasicBlockEntryGuardedByCond(Context->getParent(),
+                                          ICmpInst::getInversePredicate(Pred),
+                                          LHS, RHS))
+    return false;
+  return None;
+}
+
 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
                                               const SCEVAddRecExpr *LHS,
                                               const SCEV *RHS) {
@@ -9721,10 +10126,9 @@
   // This code is split out from isKnownPredicate because it is called from
   // within isLoopEntryGuardedByCond.
 
-  auto CheckRanges =
-      [&](const ConstantRange &RangeLHS, const ConstantRange &RangeRHS) {
-    return ConstantRange::makeSatisfyingICmpRegion(Pred, RangeRHS)
-        .contains(RangeLHS);
+  auto CheckRanges = [&](const ConstantRange &RangeLHS,
+                         const ConstantRange &RangeRHS) {
+    return RangeLHS.icmp(Pred, RangeRHS);
   };
 
   // The check at the top of the function catches the case where the values are
@@ -9732,10 +10136,13 @@
   if (Pred == CmpInst::ICMP_EQ)
     return false;
 
-  if (Pred == CmpInst::ICMP_NE)
-    return CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
-           CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)) ||
-           isKnownNonZero(getMinusSCEV(LHS, RHS));
+  if (Pred == CmpInst::ICMP_NE) {
+    if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
+        CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)))
+      return true;
+    auto *Diff = getMinusSCEV(LHS, RHS);
+    return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff);
+  }
 
   if (CmpInst::isSigned(Pred))
     return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
@@ -9746,23 +10153,48 @@
 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
                                                     const SCEV *LHS,
                                                     const SCEV *RHS) {
-  // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
-  // Return Y via OutY.
-  auto MatchBinaryAddToConst =
-      [this](const SCEV *Result, const SCEV *X, APInt &OutY,
-             SCEV::NoWrapFlags ExpectedFlags) {
-    const SCEV *NonConstOp, *ConstOp;
-    SCEV::NoWrapFlags FlagsPresent;
+  // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
+  // C1 and C2 are constant integers. If either X or Y are not add expressions,
+  // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
+  // OutC1 and OutC2.
+  auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y,
+                                      APInt &OutC1, APInt &OutC2,
+                                      SCEV::NoWrapFlags ExpectedFlags) {
+    const SCEV *XNonConstOp, *XConstOp;
+    const SCEV *YNonConstOp, *YConstOp;
+    SCEV::NoWrapFlags XFlagsPresent;
+    SCEV::NoWrapFlags YFlagsPresent;
 
-    if (!splitBinaryAdd(Result, ConstOp, NonConstOp, FlagsPresent) ||
-        !isa<SCEVConstant>(ConstOp) || NonConstOp != X)
+    if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) {
+      XConstOp = getZero(X->getType());
+      XNonConstOp = X;
+      XFlagsPresent = ExpectedFlags;
+    }
+    if (!isa<SCEVConstant>(XConstOp) ||
+        (XFlagsPresent & ExpectedFlags) != ExpectedFlags)
       return false;
 
-    OutY = cast<SCEVConstant>(ConstOp)->getAPInt();
-    return (FlagsPresent & ExpectedFlags) == ExpectedFlags;
+    if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) {
+      YConstOp = getZero(Y->getType());
+      YNonConstOp = Y;
+      YFlagsPresent = ExpectedFlags;
+    }
+
+    if (!isa<SCEVConstant>(YConstOp) ||
+        (YFlagsPresent & ExpectedFlags) != ExpectedFlags)
+      return false;
+
+    if (YNonConstOp != XNonConstOp)
+      return false;
+
+    OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
+    OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
+
+    return true;
   };
 
-  APInt C;
+  APInt C1;
+  APInt C2;
 
   switch (Pred) {
   default:
@@ -9772,45 +10204,38 @@
     std::swap(LHS, RHS);
     LLVM_FALLTHROUGH;
   case ICmpInst::ICMP_SLE:
-    // X s<= (X + C)<nsw> if C >= 0
-    if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) && C.isNonNegative())
+    // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
+    if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
       return true;
 
-    // (X + C)<nsw> s<= X if C <= 0
-    if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) &&
-        !C.isStrictlyPositive())
-      return true;
     break;
 
   case ICmpInst::ICMP_SGT:
     std::swap(LHS, RHS);
     LLVM_FALLTHROUGH;
   case ICmpInst::ICMP_SLT:
-    // X s< (X + C)<nsw> if C > 0
-    if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNSW) &&
-        C.isStrictlyPositive())
+    // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
+    if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
       return true;
 
-    // (X + C)<nsw> s< X if C < 0
-    if (MatchBinaryAddToConst(LHS, RHS, C, SCEV::FlagNSW) && C.isNegative())
-      return true;
     break;
 
   case ICmpInst::ICMP_UGE:
     std::swap(LHS, RHS);
     LLVM_FALLTHROUGH;
   case ICmpInst::ICMP_ULE:
-    // X u<= (X + C)<nuw> for any C
-    if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW))
+    // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
+    if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2))
       return true;
+
     break;
 
   case ICmpInst::ICMP_UGT:
     std::swap(LHS, RHS);
     LLVM_FALLTHROUGH;
   case ICmpInst::ICMP_ULT:
-    // X u< (X + C)<nuw> if C != 0
-    if (MatchBinaryAddToConst(RHS, LHS, C, SCEV::FlagNUW) && !C.isNullValue())
+    // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
+    if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2))
       return true;
     break;
   }
@@ -9978,9 +10403,6 @@
     assert(!verifyFunction(*BB->getParent(), &dbgs()) &&
            "This cannot be done on broken IR!");
 
-  if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
-    return true;
-
   // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
   // the facts (a >= b && a != b) separately. A typical situation is when the
   // non-strict comparison is known from ranges and non-equality is known from
@@ -9991,13 +10413,23 @@
   bool ProvedNonStrictComparison = false;
   bool ProvedNonEquality = false;
 
-  if (ProvingStrictComparison) {
-    ProvedNonStrictComparison =
-        isKnownViaNonRecursiveReasoning(NonStrictPredicate, LHS, RHS);
-    ProvedNonEquality =
-        isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE, LHS, RHS);
+  auto SplitAndProve =
+    [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool {
+    if (!ProvedNonStrictComparison)
+      ProvedNonStrictComparison = Fn(NonStrictPredicate);
+    if (!ProvedNonEquality)
+      ProvedNonEquality = Fn(ICmpInst::ICMP_NE);
     if (ProvedNonStrictComparison && ProvedNonEquality)
       return true;
+    return false;
+  };
+
+  if (ProvingStrictComparison) {
+    auto ProofFn = [&](ICmpInst::Predicate P) {
+      return isKnownViaNonRecursiveReasoning(P, LHS, RHS);
+    };
+    if (SplitAndProve(ProofFn))
+      return true;
   }
 
   // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
@@ -10005,13 +10437,10 @@
     if (isImpliedViaGuard(Block, Pred, LHS, RHS))
       return true;
     if (ProvingStrictComparison) {
-      if (!ProvedNonStrictComparison)
-        ProvedNonStrictComparison =
-            isImpliedViaGuard(Block, NonStrictPredicate, LHS, RHS);
-      if (!ProvedNonEquality)
-        ProvedNonEquality =
-            isImpliedViaGuard(Block, ICmpInst::ICMP_NE, LHS, RHS);
-      if (ProvedNonStrictComparison && ProvedNonEquality)
+      auto ProofFn = [&](ICmpInst::Predicate P) {
+        return isImpliedViaGuard(Block, P, LHS, RHS);
+      };
+      if (SplitAndProve(ProofFn))
         return true;
     }
     return false;
@@ -10023,13 +10452,10 @@
     if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context))
       return true;
     if (ProvingStrictComparison) {
-      if (!ProvedNonStrictComparison)
-        ProvedNonStrictComparison = isImpliedCond(NonStrictPredicate, LHS, RHS,
-                                                  Condition, Inverse, Context);
-      if (!ProvedNonEquality)
-        ProvedNonEquality = isImpliedCond(ICmpInst::ICMP_NE, LHS, RHS,
-                                          Condition, Inverse, Context);
-      if (ProvedNonStrictComparison && ProvedNonEquality)
+      auto ProofFn = [&](ICmpInst::Predicate P) {
+        return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context);
+      };
+      if (SplitAndProve(ProofFn))
         return true;
     }
     return false;
@@ -10089,6 +10515,10 @@
          "LHS is not available at Loop Entry");
   assert(isAvailableAtLoopEntry(RHS, L) &&
          "RHS is not available at Loop Entry");
+
+  if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
+    return true;
+
   return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
 }
 
@@ -10096,6 +10526,11 @@
                                     const SCEV *RHS,
                                     const Value *FoundCondValue, bool Inverse,
                                     const Instruction *Context) {
+  // False conditions implies anything. Do not bother analyzing it further.
+  if (FoundCondValue ==
+      ConstantInt::getBool(FoundCondValue->getContext(), Inverse))
+    return true;
+
   if (!PendingLoopPredicates.insert(FoundCondValue).second)
     return false;
 
@@ -10103,20 +10538,15 @@
       make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
 
   // Recursively handle And and Or conditions.
-  if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
-    if (BO->getOpcode() == Instruction::And) {
-      if (!Inverse)
-        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse,
-                             Context) ||
-               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse,
-                             Context);
-    } else if (BO->getOpcode() == Instruction::Or) {
-      if (Inverse)
-        return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse,
-                             Context) ||
-               isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse,
-                             Context);
-    }
+  const Value *Op0, *Op1;
+  if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
+    if (!Inverse)
+      return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) ||
+              isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context);
+  } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
+    if (Inverse)
+      return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) ||
+              isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context);
   }
 
   const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
@@ -10147,7 +10577,7 @@
     // For unsigned and equality predicates, try to prove that both found
     // operands fit into narrow unsigned range. If so, try to prove facts in
     // narrow types.
-    if (!CmpInst::isSigned(FoundPred)) {
+    if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) {
       auto *NarrowType = LHS->getType();
       auto *WideType = FoundLHS->getType();
       auto BitWidth = getTypeSizeInBits(NarrowType);
@@ -10163,6 +10593,8 @@
       }
     }
 
+    if (LHS->getType()->isPointerTy())
+      return false;
     if (CmpInst::isSigned(Pred)) {
       LHS = getSignExtendExpr(LHS, FoundLHS->getType());
       RHS = getSignExtendExpr(RHS, FoundLHS->getType());
@@ -10172,6 +10604,8 @@
     }
   } else if (getTypeSizeInBits(LHS->getType()) >
       getTypeSizeInBits(FoundLHS->getType())) {
+    if (FoundLHS->getType()->isPointerTy())
+      return false;
     if (CmpInst::isSigned(FoundPred)) {
       FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
       FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
@@ -10218,11 +10652,30 @@
   // Check whether swapping the found predicate makes it the same as the
   // desired predicate.
   if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
-    if (isa<SCEVConstant>(RHS))
+    // We can write the implication
+    // 0.  LHS Pred      RHS  <-   FoundLHS SwapPred  FoundRHS
+    // using one of the following ways:
+    // 1.  LHS Pred      RHS  <-   FoundRHS Pred      FoundLHS
+    // 2.  RHS SwapPred  LHS  <-   FoundLHS SwapPred  FoundRHS
+    // 3.  LHS Pred      RHS  <-  ~FoundLHS Pred     ~FoundRHS
+    // 4. ~LHS SwapPred ~RHS  <-   FoundLHS SwapPred  FoundRHS
+    // Forms 1. and 2. require swapping the operands of one condition. Don't
+    // do this if it would break canonical constant/addrec ordering.
+    if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
+      return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS,
+                                   Context);
+    if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
       return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context);
-    else
-      return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred), RHS,
-                                   LHS, FoundLHS, FoundRHS, Context);
+
+    // Don't try to getNotSCEV pointers.
+    if (LHS->getType()->isPointerTy() || FoundLHS->getType()->isPointerTy())
+      return false;
+
+    // There's no clear preference between forms 3. and 4., try both.
+    return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS),
+                                 FoundLHS, FoundRHS, Context) ||
+           isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS),
+                                 getNotSCEV(FoundRHS), Context);
   }
 
   // Unsigned comparison is the same as signed comparison when both the operands
@@ -10649,11 +11102,7 @@
     return true;
 
   return isImpliedCondOperandsHelper(Pred, LHS, RHS,
-                                     FoundLHS, FoundRHS) ||
-         // ~x < ~y --> x > y
-         isImpliedCondOperandsHelper(Pred, LHS, RHS,
-                                     getNotSCEV(FoundRHS),
-                                     getNotSCEV(FoundLHS));
+                                     FoundLHS, FoundRHS);
 }
 
 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
@@ -11006,7 +11455,7 @@
   // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
   // antecedent "`FoundLHS` `Pred` `FoundRHS`".
   ConstantRange FoundLHSRange =
-      ConstantRange::makeAllowedICmpRegion(Pred, ConstFoundRHS);
+      ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS);
 
   // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
   ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
@@ -11014,20 +11463,15 @@
   // We can also compute the range of values for `LHS` that satisfy the
   // consequent, "`LHS` `Pred` `RHS`":
   const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
-  ConstantRange SatisfyingLHSRange =
-      ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
-
   // The antecedent implies the consequent if every value of `LHS` that
   // satisfies the antecedent also satisfies the consequent.
-  return SatisfyingLHSRange.contains(LHSRange);
+  return LHSRange.icmp(Pred, ConstRHS);
 }
 
-bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
-                                         bool IsSigned, bool NoWrap) {
+bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
+                                        bool IsSigned) {
   assert(isKnownPositive(Stride) && "Positive stride expected!");
 
-  if (NoWrap) return false;
-
   unsigned BitWidth = getTypeSizeInBits(RHS->getType());
   const SCEV *One = getOne(Stride->getType());
 
@@ -11048,10 +11492,9 @@
   return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
 }
 
-bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
-                                         bool IsSigned, bool NoWrap) {
-  if (NoWrap) return false;
-
+bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
+                                        bool IsSigned) {
+  
   unsigned BitWidth = getTypeSizeInBits(RHS->getType());
   const SCEV *One = getOne(Stride->getType());
 
@@ -11072,12 +11515,13 @@
   return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
 }
 
-const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
-                                            bool Equality) {
-  const SCEV *One = getOne(Step->getType());
-  Delta = Equality ? getAddExpr(Delta, Step)
-                   : getAddExpr(Delta, getMinusSCEV(Step, One));
-  return getUDivExpr(Delta, Step);
+const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) {
+  // umin(N, 1) + floor((N - umin(N, 1)) / D)
+  // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
+  // expression fixes the case of N=0.
+  const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType()));
+  const SCEV *NMinusOne = getMinusSCEV(N, MinNOne);
+  return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D));
 }
 
 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
@@ -11085,25 +11529,24 @@
                                                     const SCEV *End,
                                                     unsigned BitWidth,
                                                     bool IsSigned) {
+  // The logic in this function assumes we can represent a positive stride.
+  // If we can't, the backedge-taken count must be zero.
+  if (IsSigned && BitWidth == 1)
+    return getZero(Stride->getType());
 
-  assert(!isKnownNonPositive(Stride) &&
-         "Stride is expected strictly positive!");
   // Calculate the maximum backedge count based on the range of values
   // permitted by Start, End, and Stride.
-  const SCEV *MaxBECount;
   APInt MinStart =
       IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
 
-  APInt StrideForMaxBECount =
+  APInt MinStride =
       IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
 
-  // We already know that the stride is positive, so we paper over conservatism
-  // in our range computation by forcing StrideForMaxBECount to be at least one.
-  // In theory this is unnecessary, but we expect MaxBECount to be a
-  // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there
-  // is nothing to constant fold it to).
-  APInt One(BitWidth, 1, IsSigned);
-  StrideForMaxBECount = APIntOps::smax(One, StrideForMaxBECount);
+  // We assume either the stride is positive, or the backedge-taken count
+  // is zero. So force StrideForMaxBECount to be at least one.
+  APInt One(BitWidth, 1);
+  APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride)
+                                       : APIntOps::umax(One, MinStride);
 
   APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
                             : APInt::getMaxValue(BitWidth);
@@ -11116,11 +11559,12 @@
   APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
                           : APIntOps::umin(getUnsignedRangeMax(End), Limit);
 
-  MaxBECount = computeBECount(getConstant(MaxEnd - MinStart) /* Delta */,
-                              getConstant(StrideForMaxBECount) /* Step */,
-                              false /* Equality */);
+  // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride)
+  MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart)
+                    : APIntOps::umax(MaxEnd, MinStart);
 
-  return MaxBECount;
+  return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */,
+                         getConstant(StrideForMaxBECount) /* Step */);
 }
 
 ScalarEvolution::ExitLimit
@@ -11144,8 +11588,19 @@
   if (!IV || IV->getLoop() != L || !IV->isAffine())
     return getCouldNotCompute();
 
-  bool NoWrap = ControlsExit &&
-                IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
+  // A precondition of this method is that the condition being analyzed
+  // reaches an exiting branch which dominates the latch.  Given that, we can
+  // assume that an increment which violates the nowrap specification and
+  // produces poison must cause undefined behavior when the resulting poison
+  // value is branched upon and thus we can conclude that the backedge is
+  // taken no more often than would be required to produce that poison value.
+  // Note that a well defined loop can exit on the iteration which violates
+  // the nowrap specification if there is another exit (either explicit or
+  // implicit/exceptional) which causes the loop to execute before the
+  // exiting instruction we're analyzing would trigger UB.
+  auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
+  bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
+  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
 
   const SCEV *Stride = IV->getStepRecurrence(*this);
 
@@ -11177,8 +11632,8 @@
     // Precondition a) implies that if the stride is negative, this is a single
     // trip loop. The backedge taken count formula reduces to zero in this case.
     //
-    // Precondition b) implies that the unknown stride cannot be zero otherwise
-    // we have UB.
+    // Precondition b) implies that if rhs is invariant in L, then unknown
+    // stride being zero means the backedge can't be taken without UB.
     //
     // The positive stride case is the same as isKnownPositive(Stride) returning
     // true (original behavior of the function).
@@ -11196,20 +11651,109 @@
     //   A[i] = i;
     //
     if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
-        !loopHasNoSideEffects(L))
+        !loopIsFiniteByAssumption(L))
       return getCouldNotCompute();
-  } else if (!Stride->isOne() &&
-             doesIVOverflowOnLT(RHS, Stride, IsSigned, NoWrap))
+
+    if (!isKnownNonZero(Stride)) {
+      // If we have a step of zero, and RHS isn't invariant in L, we don't know
+      // if it might eventually be greater than start and if so, on which
+      // iteration.  We can't even produce a useful upper bound.
+      if (!isLoopInvariant(RHS, L))
+        return getCouldNotCompute();
+
+      // We allow a potentially zero stride, but we need to divide by stride
+      // below.  Since the loop can't be infinite and this check must control
+      // the sole exit, we can infer the exit must be taken on the first
+      // iteration (e.g. backedge count = 0) if the stride is zero.  Given that,
+      // we know the numerator in the divides below must be zero, so we can
+      // pick an arbitrary non-zero value for the denominator (e.g. stride)
+      // and produce the right result.
+      // FIXME: Handle the case where Stride is poison?
+      auto wouldZeroStrideBeUB = [&]() {
+        // Proof by contradiction.  Suppose the stride were zero.  If we can
+        // prove that the backedge *is* taken on the first iteration, then since
+        // we know this condition controls the sole exit, we must have an
+        // infinite loop.  We can't have a (well defined) infinite loop per
+        // check just above.
+        // Note: The (Start - Stride) term is used to get the start' term from
+        // (start' + stride,+,stride). Remember that we only care about the
+        // result of this expression when stride == 0 at runtime.
+        auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride);
+        return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS);
+      };
+      if (!wouldZeroStrideBeUB()) {
+        Stride = getUMaxExpr(Stride, getOne(Stride->getType()));
+      }
+    }
+  } else if (!Stride->isOne() && !NoWrap) {
+    auto isUBOnWrap = [&]() {
+      // Can we prove this loop *must* be UB if overflow of IV occurs?
+      // Reasoning goes as follows:
+      // * Suppose the IV did self wrap.
+      // * If Stride evenly divides the iteration space, then once wrap
+      //   occurs, the loop must revisit the same values.
+      // * We know that RHS is invariant, and that none of those values
+      //   caused this exit to be taken previously.  Thus, this exit is
+      //   dynamically dead.
+      // * If this is the sole exit, then a dead exit implies the loop
+      //   must be infinite if there are no abnormal exits.
+      // * If the loop were infinite, then it must either not be mustprogress
+      //   or have side effects. Otherwise, it must be UB.
+      // * It can't (by assumption), be UB so we have contradicted our
+      //   premise and can conclude the IV did not in fact self-wrap.
+      // From no-self-wrap, we need to then prove no-(un)signed-wrap.  This
+      // follows trivially from the fact that every (un)signed-wrapped, but
+      // not self-wrapped value must be LT than the last value before
+      // (un)signed wrap.  Since we know that last value didn't exit, nor
+      // will any smaller one.
+
+      if (!isLoopInvariant(RHS, L))
+        return false;
+
+      auto *StrideC = dyn_cast<SCEVConstant>(Stride);
+      if (!StrideC || !StrideC->getAPInt().isPowerOf2())
+        return false;
+
+      if (!ControlsExit || !loopHasNoAbnormalExits(L))
+        return false;
+
+      return loopIsFiniteByAssumption(L);
+    };
+
     // Avoid proven overflow cases: this will ensure that the backedge taken
     // count will not generate any unsigned overflow. Relaxed no-overflow
     // conditions exploit NoWrapFlags, allowing to optimize in presence of
     // undefined behaviors like the case of C language.
-    return getCouldNotCompute();
+    if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap())
+      return getCouldNotCompute();
+  }
 
-  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT
-                                      : ICmpInst::ICMP_ULT;
+  // On all paths just preceeding, we established the following invariant:
+  //   IV can be assumed not to overflow up to and including the exiting
+  //   iteration.  We proved this in one of two ways:
+  //   1) We can show overflow doesn't occur before the exiting iteration
+  //      1a) canIVOverflowOnLT, and b) step of one
+  //   2) We can show that if overflow occurs, the loop must execute UB
+  //      before any possible exit.
+  // Note that we have not yet proved RHS invariant (in general).
+
   const SCEV *Start = IV->getStart();
-  const SCEV *End = RHS;
+
+  // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
+  // Use integer-typed versions for actual computation.
+  const SCEV *OrigStart = Start;
+  const SCEV *OrigRHS = RHS;
+  if (Start->getType()->isPointerTy()) {
+    Start = getLosslessPtrToIntExpr(Start);
+    if (isa<SCEVCouldNotCompute>(Start))
+      return Start;
+  }
+  if (RHS->getType()->isPointerTy()) {
+    RHS = getLosslessPtrToIntExpr(RHS);
+    if (isa<SCEVCouldNotCompute>(RHS))
+      return RHS;
+  }
+
   // When the RHS is not invariant, we do not know the end bound of the loop and
   // cannot calculate the ExactBECount needed by ExitLimit. However, we can
   // calculate the MaxBECount, given the start, stride and max value for the end
@@ -11221,38 +11765,172 @@
     return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
                      false /*MaxOrZero*/, Predicates);
   }
-  // If the backedge is taken at least once, then it will be taken
-  // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
-  // is the LHS value of the less-than comparison the first time it is evaluated
-  // and End is the RHS.
-  const SCEV *BECountIfBackedgeTaken =
-    computeBECount(getMinusSCEV(End, Start), Stride, false);
-  // If the loop entry is guarded by the result of the backedge test of the
-  // first loop iteration, then we know the backedge will be taken at least
-  // once and so the backedge taken count is as above. If not then we use the
-  // expression (max(End,Start)-Start)/Stride to describe the backedge count,
-  // as if the backedge is taken at least once max(End,Start) is End and so the
-  // result is as above, and if not max(End,Start) is Start so we get a backedge
-  // count of zero.
-  const SCEV *BECount;
-  if (isLoopEntryGuardedByCond(L, Cond, getMinusSCEV(Start, Stride), RHS))
-    BECount = BECountIfBackedgeTaken;
-  else {
+
+  // We use the expression (max(End,Start)-Start)/Stride to describe the
+  // backedge count, as if the backedge is taken at least once max(End,Start)
+  // is End and so the result is as above, and if not max(End,Start) is Start
+  // so we get a backedge count of zero.
+  const SCEV *BECount = nullptr;
+  auto *StartMinusStride = getMinusSCEV(OrigStart, Stride);
+  // Can we prove (max(RHS,Start) > Start - Stride?
+  if (isLoopEntryGuardedByCond(L, Cond, StartMinusStride, Start) &&
+      isLoopEntryGuardedByCond(L, Cond, StartMinusStride, RHS)) {
+    // In this case, we can use a refined formula for computing backedge taken
+    // count.  The general formula remains:
+    //   "End-Start /uceiling Stride" where "End = max(RHS,Start)"
+    // We want to use the alternate formula:
+    //   "((End - 1) - (Start - Stride)) /u Stride"
+    // Let's do a quick case analysis to show these are equivalent under
+    // our precondition that max(RHS,Start) > Start - Stride.
+    // * For RHS <= Start, the backedge-taken count must be zero.
+    //   "((End - 1) - (Start - Stride)) /u Stride" reduces to
+    //   "((Start - 1) - (Start - Stride)) /u Stride" which simplies to
+    //   "Stride - 1 /u Stride" which is indeed zero for all non-zero values
+    //     of Stride.  For 0 stride, we've use umin(1,Stride) above, reducing
+    //     this to the stride of 1 case.
+    // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride".
+    //   "((End - 1) - (Start - Stride)) /u Stride" reduces to
+    //   "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
+    //   "((RHS - (Start - Stride) - 1) /u Stride".
+    //   Our preconditions trivially imply no overflow in that form.
+    const SCEV *MinusOne = getMinusOne(Stride->getType());
+    const SCEV *Numerator =
+        getMinusSCEV(getAddExpr(RHS, MinusOne), StartMinusStride);
+    if (!isa<SCEVCouldNotCompute>(Numerator)) {
+      BECount = getUDivExpr(Numerator, Stride);
+    }
+  }
+
+  const SCEV *BECountIfBackedgeTaken = nullptr;
+  if (!BECount) {
+    auto canProveRHSGreaterThanEqualStart = [&]() {
+      auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
+      if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart))
+        return true;
+
+      // (RHS > Start - 1) implies RHS >= Start.
+      // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if
+      //   "Start - 1" doesn't overflow.
+      // * For signed comparison, if Start - 1 does overflow, it's equal
+      //   to INT_MAX, and "RHS >s INT_MAX" is trivially false.
+      // * For unsigned comparison, if Start - 1 does overflow, it's equal
+      //   to UINT_MAX, and "RHS >u UINT_MAX" is trivially false.
+      //
+      // FIXME: Should isLoopEntryGuardedByCond do this for us?
+      auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+      auto *StartMinusOne = getAddExpr(OrigStart,
+                                       getMinusOne(OrigStart->getType()));
+      return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne);
+    };
+
     // If we know that RHS >= Start in the context of loop, then we know that
     // max(RHS, Start) = RHS at this point.
-    if (isLoopEntryGuardedByCond(
-            L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, RHS, Start))
+    const SCEV *End;
+    if (canProveRHSGreaterThanEqualStart()) {
       End = RHS;
-    else
+    } else {
+      // If RHS < Start, the backedge will be taken zero times.  So in
+      // general, we can write the backedge-taken count as:
+      //
+      //     RHS >= Start ? ceil(RHS - Start) / Stride : 0
+      //
+      // We convert it to the following to make it more convenient for SCEV:
+      //
+      //     ceil(max(RHS, Start) - Start) / Stride
       End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
-    BECount = computeBECount(getMinusSCEV(End, Start), Stride, false);
+
+      // See what would happen if we assume the backedge is taken. This is
+      // used to compute MaxBECount.
+      BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride);
+    }
+
+    // At this point, we know:
+    //
+    // 1. If IsSigned, Start <=s End; otherwise, Start <=u End
+    // 2. The index variable doesn't overflow.
+    //
+    // Therefore, we know N exists such that
+    // (Start + Stride * N) >= End, and computing "(Start + Stride * N)"
+    // doesn't overflow.
+    //
+    // Using this information, try to prove whether the addition in
+    // "(Start - End) + (Stride - 1)" has unsigned overflow.
+    const SCEV *One = getOne(Stride->getType());
+    bool MayAddOverflow = [&] {
+      if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) {
+        if (StrideC->getAPInt().isPowerOf2()) {
+          // Suppose Stride is a power of two, and Start/End are unsigned
+          // integers.  Let UMAX be the largest representable unsigned
+          // integer.
+          //
+          // By the preconditions of this function, we know
+          // "(Start + Stride * N) >= End", and this doesn't overflow.
+          // As a formula:
+          //
+          //   End <= (Start + Stride * N) <= UMAX
+          //
+          // Subtracting Start from all the terms:
+          //
+          //   End - Start <= Stride * N <= UMAX - Start
+          //
+          // Since Start is unsigned, UMAX - Start <= UMAX.  Therefore:
+          //
+          //   End - Start <= Stride * N <= UMAX
+          //
+          // Stride * N is a multiple of Stride. Therefore,
+          //
+          //   End - Start <= Stride * N <= UMAX - (UMAX mod Stride)
+          //
+          // Since Stride is a power of two, UMAX + 1 is divisible by Stride.
+          // Therefore, UMAX mod Stride == Stride - 1.  So we can write:
+          //
+          //   End - Start <= Stride * N <= UMAX - Stride - 1
+          //
+          // Dropping the middle term:
+          //
+          //   End - Start <= UMAX - Stride - 1
+          //
+          // Adding Stride - 1 to both sides:
+          //
+          //   (End - Start) + (Stride - 1) <= UMAX
+          //
+          // In other words, the addition doesn't have unsigned overflow.
+          //
+          // A similar proof works if we treat Start/End as signed values.
+          // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to
+          // use signed max instead of unsigned max. Note that we're trying
+          // to prove a lack of unsigned overflow in either case.
+          return false;
+        }
+      }
+      if (Start == Stride || Start == getMinusSCEV(Stride, One)) {
+        // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1.
+        // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End.
+        // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End.
+        //
+        // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End.
+        return false;
+      }
+      return true;
+    }();
+
+    const SCEV *Delta = getMinusSCEV(End, Start);
+    if (!MayAddOverflow) {
+      // floor((D + (S - 1)) / S)
+      // We prefer this formulation if it's legal because it's fewer operations.
+      BECount =
+          getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride);
+    } else {
+      BECount = getUDivCeilSCEV(Delta, Stride);
+    }
   }
 
   const SCEV *MaxBECount;
   bool MaxOrZero = false;
-  if (isa<SCEVConstant>(BECount))
+  if (isa<SCEVConstant>(BECount)) {
     MaxBECount = BECount;
-  else if (isa<SCEVConstant>(BECountIfBackedgeTaken)) {
+  } else if (BECountIfBackedgeTaken &&
+             isa<SCEVConstant>(BECountIfBackedgeTaken)) {
     // If we know exactly how many times the backedge will be taken if it's
     // taken at least once, then the backedge count will either be that or
     // zero.
@@ -11290,8 +11968,9 @@
   if (!IV || IV->getLoop() != L || !IV->isAffine())
     return getCouldNotCompute();
 
-  bool NoWrap = ControlsExit &&
-                IV->getNoWrapFlags(IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW);
+  auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
+  bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
+  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
 
   const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
 
@@ -11303,11 +11982,9 @@
   // will not generate any unsigned overflow. Relaxed no-overflow conditions
   // exploit NoWrapFlags, allowing to optimize in presence of undefined
   // behaviors like the case of C language.
-  if (!Stride->isOne() && doesIVOverflowOnGT(RHS, Stride, IsSigned, NoWrap))
-    return getCouldNotCompute();
-
-  ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT
-                                      : ICmpInst::ICMP_UGT;
+  if (!Stride->isOne() && !NoWrap)
+    if (canIVOverflowOnGT(RHS, Stride, IsSigned))
+      return getCouldNotCompute();
 
   const SCEV *Start = IV->getStart();
   const SCEV *End = RHS;
@@ -11321,7 +11998,23 @@
       End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
   }
 
-  const SCEV *BECount = computeBECount(getMinusSCEV(Start, End), Stride, false);
+  if (Start->getType()->isPointerTy()) {
+    Start = getLosslessPtrToIntExpr(Start);
+    if (isa<SCEVCouldNotCompute>(Start))
+      return Start;
+  }
+  if (End->getType()->isPointerTy()) {
+    End = getLosslessPtrToIntExpr(End);
+    if (isa<SCEVCouldNotCompute>(End))
+      return End;
+  }
+
+  // Compute ((Start - End) + (Stride - 1)) / Stride.
+  // FIXME: This can overflow. Holding off on fixing this for now;
+  // howManyGreaterThans will hopefully be gone soon.
+  const SCEV *One = getOne(Stride->getType());
+  const SCEV *BECount = getUDivExpr(
+      getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride);
 
   APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
                             : getUnsignedRangeMax(Start);
@@ -11342,8 +12035,8 @@
 
   const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
                                ? BECount
-                               : computeBECount(getConstant(MaxStart - MinEnd),
-                                                getConstant(MinStride), false);
+                               : getUDivCeilSCEV(getConstant(MaxStart - MinEnd),
+                                                 getConstant(MinStride));
 
   if (isa<SCEVCouldNotCompute>(MaxBECount))
     MaxBECount = BECount;
@@ -11912,20 +12605,12 @@
   assert(Subscripts.empty() && Sizes.empty() &&
          "Expected output lists to be empty on entry to this function.");
   assert(GEP && "getIndexExpressionsFromGEP called with a null GEP");
-  Type *Ty = GEP->getPointerOperandType();
+  Type *Ty = nullptr;
   bool DroppedFirstDim = false;
   for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
     const SCEV *Expr = getSCEV(GEP->getOperand(i));
     if (i == 1) {
-      if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
-        Ty = PtrTy->getElementType();
-      } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
-        Ty = ArrayTy->getElementType();
-      } else {
-        Subscripts.clear();
-        Sizes.clear();
-        return false;
-      }
+      Ty = GEP->getSourceElementType();
       if (auto *Const = dyn_cast<SCEVConstant>(Expr))
         if (Const->getValue()->isZero()) {
           DroppedFirstDim = true;
@@ -12061,13 +12746,8 @@
   ExprValueMap.clear();
   ValueExprMap.clear();
   HasRecMap.clear();
-
-  // Free any extra memory created for ExitNotTakenInfo in the unlikely event
-  // that a loop had multiple computable exits.
-  for (auto &BTCI : BackedgeTakenCounts)
-    BTCI.second.clear();
-  for (auto &BTCI : PredicatedBackedgeTakenCounts)
-    BTCI.second.clear();
+  BackedgeTakenCounts.clear();
+  PredicatedBackedgeTakenCounts.clear();
 
   assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
   assert(PendingPhiRanges.empty() && "getRangeRef garbage");
@@ -12450,14 +13130,6 @@
   return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
 }
 
-bool ScalarEvolution::ExitLimit::hasOperand(const SCEV *S) const {
-  auto IsS = [&](const SCEV *X) { return S == X; };
-  auto ContainsS = [&](const SCEV *X) {
-    return !isa<SCEVCouldNotCompute>(X) && SCEVExprContains(X, IsS);
-  };
-  return ContainsS(ExactNotTaken) || ContainsS(MaxNotTaken);
-}
-
 void
 ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
   ValuesAtScopes.erase(S);
@@ -12479,13 +13151,12 @@
   }
 
   auto RemoveSCEVFromBackedgeMap =
-      [S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
+      [S](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
         for (auto I = Map.begin(), E = Map.end(); I != E;) {
           BackedgeTakenInfo &BEInfo = I->second;
-          if (BEInfo.hasOperand(S, this)) {
-            BEInfo.clear();
+          if (BEInfo.hasOperand(S))
             Map.erase(I++);
-          } else
+          else
             ++I;
         }
       };
@@ -13155,6 +13826,11 @@
   if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
     if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
       LHS = Trunc->getOperand();
+      // Bail out if the type of the LHS is larger than the type of the
+      // expression for now.
+      if (getTypeSizeInBits(LHS->getType()) >
+          getTypeSizeInBits(Expr->getType()))
+        return false;
       if (LHS->getType() != Expr->getType())
         LHS = getZeroExtendExpr(LHS, Expr->getType());
       RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1)
@@ -13245,54 +13921,118 @@
 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
   auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
                               const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) {
-    if (!isa<SCEVUnknown>(LHS)) {
+    // If we have LHS == 0, check if LHS is computing a property of some unknown
+    // SCEV %v which we can rewrite %v to express explicitly.
+    const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
+    if (Predicate == CmpInst::ICMP_EQ && RHSC &&
+        RHSC->getValue()->isNullValue()) {
+      // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
+      // explicitly express that.
+      const SCEV *URemLHS = nullptr;
+      const SCEV *URemRHS = nullptr;
+      if (matchURem(LHS, URemLHS, URemRHS)) {
+        if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
+          Value *V = LHSUnknown->getValue();
+          auto Multiple =
+              getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS,
+                         (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
+          RewriteMap[V] = Multiple;
+          return;
+        }
+      }
+    }
+
+    if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) {
       std::swap(LHS, RHS);
       Predicate = CmpInst::getSwappedPredicate(Predicate);
     }
 
-    // For now, limit to conditions that provide information about unknown
-    // expressions.
-    auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS);
-    if (!LHSUnknown)
+    // Check for a condition of the form (-C1 + X < C2).  InstCombine will
+    // create this form when combining two checks of the form (X u< C2 + C1) and
+    // (X >=u C1).
+    auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap]() {
+      auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS);
+      if (!AddExpr || AddExpr->getNumOperands() != 2)
+        return false;
+
+      auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0));
+      auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1));
+      auto *C2 = dyn_cast<SCEVConstant>(RHS);
+      if (!C1 || !C2 || !LHSUnknown)
+        return false;
+
+      auto ExactRegion =
+          ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt())
+              .sub(C1->getAPInt());
+
+      // Bail out, unless we have a non-wrapping, monotonic range.
+      if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
+        return false;
+      auto I = RewriteMap.find(LHSUnknown->getValue());
+      const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
+      RewriteMap[LHSUnknown->getValue()] = getUMaxExpr(
+          getConstant(ExactRegion.getUnsignedMin()),
+          getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax())));
+      return true;
+    };
+    if (MatchRangeCheckIdiom())
       return;
 
-    // TODO: use information from more predicates.
-    switch (Predicate) {
-    case CmpInst::ICMP_ULT: {
-      if (!containsAddRecurrence(RHS)) {
-        const SCEV *Base = LHS;
-        auto I = RewriteMap.find(LHSUnknown->getValue());
-        if (I != RewriteMap.end())
-          Base = I->second;
+    // For now, limit to conditions that provide information about unknown
+    // expressions. RHS also cannot contain add recurrences.
+    auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS);
+    if (!LHSUnknown || containsAddRecurrence(RHS))
+      return;
 
-        RewriteMap[LHSUnknown->getValue()] =
-            getUMinExpr(Base, getMinusSCEV(RHS, getOne(RHS->getType())));
-      }
+    // Check whether LHS has already been rewritten. In that case we want to
+    // chain further rewrites onto the already rewritten value.
+    auto I = RewriteMap.find(LHSUnknown->getValue());
+    const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
+    const SCEV *RewrittenRHS = nullptr;
+    switch (Predicate) {
+    case CmpInst::ICMP_ULT:
+      RewrittenRHS =
+          getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
       break;
-    }
-    case CmpInst::ICMP_ULE: {
-      if (!containsAddRecurrence(RHS)) {
-        const SCEV *Base = LHS;
-        auto I = RewriteMap.find(LHSUnknown->getValue());
-        if (I != RewriteMap.end())
-          Base = I->second;
-        RewriteMap[LHSUnknown->getValue()] = getUMinExpr(Base, RHS);
-      }
+    case CmpInst::ICMP_SLT:
+      RewrittenRHS =
+          getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
       break;
-    }
+    case CmpInst::ICMP_ULE:
+      RewrittenRHS = getUMinExpr(RewrittenLHS, RHS);
+      break;
+    case CmpInst::ICMP_SLE:
+      RewrittenRHS = getSMinExpr(RewrittenLHS, RHS);
+      break;
+    case CmpInst::ICMP_UGT:
+      RewrittenRHS =
+          getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
+      break;
+    case CmpInst::ICMP_SGT:
+      RewrittenRHS =
+          getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
+      break;
+    case CmpInst::ICMP_UGE:
+      RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS);
+      break;
+    case CmpInst::ICMP_SGE:
+      RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS);
+      break;
     case CmpInst::ICMP_EQ:
       if (isa<SCEVConstant>(RHS))
-        RewriteMap[LHSUnknown->getValue()] = RHS;
+        RewrittenRHS = RHS;
       break;
     case CmpInst::ICMP_NE:
       if (isa<SCEVConstant>(RHS) &&
           cast<SCEVConstant>(RHS)->getValue()->isNullValue())
-        RewriteMap[LHSUnknown->getValue()] =
-            getUMaxExpr(LHS, getOne(RHS->getType()));
+        RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType()));
       break;
     default:
       break;
     }
+
+    if (RewrittenRHS)
+      RewriteMap[LHSUnknown->getValue()] = RewrittenRHS;
   };
   // Starting at the loop predecessor, climb up the predecessor chain, as long
   // as there are predecessors that can be found that have unique successors
@@ -13308,16 +14048,30 @@
     if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional())
       continue;
 
-    // TODO: use information from more complex conditions, e.g. AND expressions.
-    auto *Cmp = dyn_cast<ICmpInst>(LoopEntryPredicate->getCondition());
-    if (!Cmp)
-      continue;
+    bool EnterIfTrue = LoopEntryPredicate->getSuccessor(0) == Pair.second;
+    SmallVector<Value *, 8> Worklist;
+    SmallPtrSet<Value *, 8> Visited;
+    Worklist.push_back(LoopEntryPredicate->getCondition());
+    while (!Worklist.empty()) {
+      Value *Cond = Worklist.pop_back_val();
+      if (!Visited.insert(Cond).second)
+        continue;
 
-    auto Predicate = Cmp->getPredicate();
-    if (LoopEntryPredicate->getSuccessor(1) == Pair.second)
-      Predicate = CmpInst::getInversePredicate(Predicate);
-    CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
-                     getSCEV(Cmp->getOperand(1)), RewriteMap);
+      if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
+        auto Predicate =
+            EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
+        CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
+                         getSCEV(Cmp->getOperand(1)), RewriteMap);
+        continue;
+      }
+
+      Value *L, *R;
+      if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R)))
+                      : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) {
+        Worklist.push_back(L);
+        Worklist.push_back(R);
+      }
+    }
   }
 
   // Also collect information from assumptions dominating the loop.
diff --git a/src/llvm-project/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 8f289fe..2262fc9 100644
--- a/src/llvm-project/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -19,6 +19,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
 #include "llvm/InitializePasses.h"
 using namespace llvm;
 
@@ -28,7 +29,7 @@
   // pointer values are. This allows the code below to ignore this special
   // case.
   if (LocA.Size.isZero() || LocB.Size.isZero())
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   // This is SCEVAAResult. Get the SCEVs!
   const SCEV *AS = SE.getSCEV(const_cast<Value *>(LocA.Ptr));
@@ -36,7 +37,7 @@
 
   // If they evaluate to the same expression, it's a MustAlias.
   if (AS == BS)
-    return MustAlias;
+    return AliasResult::MustAlias;
 
   // If something is known about the difference between the two addresses,
   // see if it's enough to prove a NoAlias.
@@ -56,9 +57,10 @@
     // Test whether the difference is known to be great enough that memory of
     // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
     // are non-zero, which is special-cased above.
-    if (ASizeInt.ule(SE.getUnsignedRange(BA).getUnsignedMin()) &&
+    if (!isa<SCEVCouldNotCompute>(BA) &&
+        ASizeInt.ule(SE.getUnsignedRange(BA).getUnsignedMin()) &&
         (-BSizeInt).uge(SE.getUnsignedRange(BA).getUnsignedMax()))
-      return NoAlias;
+      return AliasResult::NoAlias;
 
     // Folding the subtraction while preserving range information can be tricky
     // (because of INT_MIN, etc.); if the prior test failed, swap AS and BS
@@ -70,9 +72,10 @@
     // Test whether the difference is known to be great enough that memory of
     // the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
     // are non-zero, which is special-cased above.
-    if (BSizeInt.ule(SE.getUnsignedRange(AB).getUnsignedMin()) &&
+    if (!isa<SCEVCouldNotCompute>(AB) &&
+        BSizeInt.ule(SE.getUnsignedRange(AB).getUnsignedMin()) &&
         (-ASizeInt).uge(SE.getUnsignedRange(AB).getUnsignedMax()))
-      return NoAlias;
+      return AliasResult::NoAlias;
   }
 
   // If ScalarEvolution can find an underlying object, form a new query.
@@ -89,8 +92,8 @@
                              BO ? LocationSize::beforeOrAfterPointer()
                                 : LocB.Size,
                              BO ? AAMDNodes() : LocB.AATags),
-              AAQI) == NoAlias)
-      return NoAlias;
+              AAQI) == AliasResult::NoAlias)
+      return AliasResult::NoAlias;
 
   // Forward the query to the next analysis.
   return AAResultBase::alias(LocA, LocB, AAQI);
@@ -117,6 +120,13 @@
   return nullptr;
 }
 
+bool SCEVAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
+                              FunctionAnalysisManager::Invalidator &Inv) {
+  // We don't care if this analysis itself is preserved, it has no state. But
+  // we need to check that the analyses it depends on have been.
+  return Inv.invalidate<ScalarEvolutionAnalysis>(Fn, PA);
+}
+
 AnalysisKey SCEVAA::Key;
 
 SCEVAAResult SCEVAA::run(Function &F, FunctionAnalysisManager &AM) {
diff --git a/src/llvm-project/llvm/lib/Analysis/ScopedNoAliasAA.cpp b/src/llvm-project/llvm/lib/Analysis/ScopedNoAliasAA.cpp
index 6b38d67..e847bf8 100644
--- a/src/llvm-project/llvm/lib/Analysis/ScopedNoAliasAA.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ScopedNoAliasAA.cpp
@@ -32,6 +32,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Analysis/ScopedNoAliasAA.h"
+#include "llvm/ADT/SetOperations.h"
 #include "llvm/ADT/SmallPtrSet.h"
 #include "llvm/Analysis/MemoryLocation.h"
 #include "llvm/IR/InstrTypes.h"
@@ -63,10 +64,10 @@
   const MDNode *ANoAlias = LocA.AATags.NoAlias, *BNoAlias = LocB.AATags.NoAlias;
 
   if (!mayAliasInScopes(AScopes, BNoAlias))
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   if (!mayAliasInScopes(BScopes, ANoAlias))
-    return NoAlias;
+    return AliasResult::NoAlias;
 
   // If they may alias, chain to the next AliasAnalysis.
   return AAResultBase::alias(LocA, LocB, AAQI);
@@ -138,14 +139,7 @@
     collectMDInDomain(NoAlias, Domain, NANodes);
 
     // To not alias, all of the nodes in ScopeNodes must be in NANodes.
-    bool FoundAll = true;
-    for (const MDNode *SMD : ScopeNodes)
-      if (!NANodes.count(SMD)) {
-        FoundAll = false;
-        break;
-      }
-
-    if (FoundAll)
+    if (llvm::set_is_subset(ScopeNodes, NANodes))
       return false;
   }
 
diff --git a/src/llvm-project/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/StackSafetyAnalysis.cpp
index 73096eb..76f195f 100644
--- a/src/llvm-project/llvm/lib/Analysis/StackSafetyAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/StackSafetyAnalysis.cpp
@@ -263,6 +263,8 @@
   const SCEV *AddrExp = SE.getTruncateOrZeroExtend(SE.getSCEV(Addr), PtrTy);
   const SCEV *BaseExp = SE.getTruncateOrZeroExtend(SE.getSCEV(Base), PtrTy);
   const SCEV *Diff = SE.getMinusSCEV(AddrExp, BaseExp);
+  if (isa<SCEVCouldNotCompute>(Diff))
+    return UnknownRange;
 
   ConstantRange Offset = SE.getSignedRange(Diff);
   if (isUnsafe(Offset))
diff --git a/src/llvm-project/llvm/lib/Analysis/SyncDependenceAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/SyncDependenceAnalysis.cpp
index 67a1365..59582cd 100644
--- a/src/llvm-project/llvm/lib/Analysis/SyncDependenceAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/SyncDependenceAnalysis.cpp
@@ -420,13 +420,9 @@
 #ifndef NDEBUG
 static void printBlockSet(ConstBlockSet &Blocks, raw_ostream &Out) {
   Out << "[";
-  bool First = true;
-  for (const auto *BB : Blocks) {
-    if (!First)
-      Out << ", ";
-    First = false;
-    Out << BB->getName();
-  }
+  ListSeparator LS;
+  for (const auto *BB : Blocks)
+    Out << LS << BB->getName();
   Out << "]";
 }
 #endif
diff --git a/src/llvm-project/llvm/lib/Analysis/TFUtils.cpp b/src/llvm-project/llvm/lib/Analysis/TFUtils.cpp
index 1377cac..e93dc30 100644
--- a/src/llvm-project/llvm/lib/Analysis/TFUtils.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/TFUtils.cpp
@@ -15,6 +15,7 @@
 
 #include "llvm/ADT/Twine.h"
 #include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/JSON.h"
 #include "llvm/Support/ManagedStatic.h"
@@ -22,14 +23,22 @@
 #include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
 
+#include "google/protobuf/text_format.h"
 #include "tensorflow/c/c_api.h"
 #include "tensorflow/c/c_api_experimental.h"
-
+#include "tensorflow/core/example/example.pb.h"
 #include <cassert>
 #include <numeric>
 
 using namespace llvm;
 
+using google::protobuf::Message;
+using google::protobuf::TextFormat;
+
+static cl::opt<bool>
+    ProtobufTextMode("tfutils-text-log", cl::init(false), cl::Hidden,
+                     cl::desc("Output textual (human-readable) protobuf."));
+
 namespace {
 
 using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
@@ -64,89 +73,6 @@
 TFSessionOptionsPtr createTFSessionOptions() {
   return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
 }
-
-/// Write the values of one tensor as a list.
-template <typename T>
-void writeTensorValues(raw_ostream &OutFile, const char *TensorData,
-                       size_t ElemCount) {
-  OutFile << "[";
-  const T *TypedData = reinterpret_cast<const T *>(TensorData);
-  for (size_t I = 0; I < ElemCount; ++I) {
-    if (I > 0)
-      OutFile << ", ";
-    OutFile << TypedData[I];
-  }
-  OutFile << "]";
-}
-
-/// Write a list of tensors as a sequence of TensorFlow FeatureList protobufs.
-/// The tensors are assumed to be stored contiguously, in row-major format,
-/// in the TensorData buffer. Each tensor has the shape given by Spec. The
-/// feature name in the output is either the provided LoggingName, if
-/// specified, otherwise it's the name of the tensor (as given by Spec).
-void writeRawTensorsAsFeatureLists(raw_ostream &OutFile,
-                                   const LoggedFeatureSpec &LoggedSpec,
-                                   const char *TensorData, size_t TensorCount,
-                                   bool FinalReward = false) {
-  const char *FieldName = "<invalid>";
-  std::function<void(const char *)> ValueWriter;
-  const auto &Spec = LoggedSpec.Spec;
-  // The 'Feature' protobuf only has 3 possible fields: float_list,
-  // int64_list, or bytes_list, so we capture int32 values as int64. We don't
-  // support any other types.
-  if (Spec.isElementType<int64_t>()) {
-    FieldName = "int64_list";
-    ValueWriter = [&](const char *Data) {
-      writeTensorValues<int64_t>(OutFile, Data, Spec.getElementCount());
-    };
-  } else if (Spec.isElementType<int32_t>()) {
-    FieldName = "int64_list";
-    ValueWriter = [&](const char *Data) {
-      writeTensorValues<int32_t>(OutFile, Data, Spec.getElementCount());
-    };
-
-  } else if (Spec.isElementType<float>()) {
-    FieldName = "float_list";
-    ValueWriter = [&](const char *Data) {
-      writeTensorValues<float>(OutFile, Data, Spec.getElementCount());
-    };
-
-  } else {
-    llvm_unreachable("Unsupported tensor type.");
-  }
-
-  OutFile << "  feature_list: {\n";
-  OutFile << "    key: "
-          << "\""
-          << (LoggedSpec.LoggingName ? *LoggedSpec.LoggingName : Spec.name())
-          << "\" ";
-  OutFile << "value: {\n";
-  size_t TensorByteSize = Spec.getElementCount() * Spec.getElementByteSize();
-
-  auto WriteFeatureProto = [&](const char *P) {
-    OutFile << "      feature: { " << FieldName << ": { value: ";
-    ValueWriter(P);
-    OutFile << " } }\n";
-  };
-
-  const char *CurrentTensor = TensorData;
-  static int64_t Zero = 0;
-  // Write all but the last value. If this is the final reward, don't increment
-  // the CurrentTensor, and just write 0.
-  for (size_t I = 0; I < TensorCount - 1; ++I) {
-    if (FinalReward)
-      WriteFeatureProto(reinterpret_cast<const char *>(&Zero));
-    else {
-      WriteFeatureProto(CurrentTensor);
-      CurrentTensor += TensorByteSize;
-    }
-  }
-
-  WriteFeatureProto(CurrentTensor);
-
-  OutFile << "    }\n";
-  OutFile << "  }\n";
-}
 } // namespace
 
 namespace llvm {
@@ -332,6 +258,76 @@
   bool checkReportAndInvalidate(const TF_Output &Output,
                                 const TensorSpec &OutputSpec);
 };
+
+class LoggerDataImpl {
+  const std::vector<LoggedFeatureSpec> LoggedFeatureSpecs;
+  const TensorSpec RewardSpec;
+
+  tensorflow::SequenceExample SE;
+  std::vector<tensorflow::FeatureList *> FeatureLists;
+  tensorflow::FeatureList *Reward = nullptr;
+
+public:
+  LoggerDataImpl(const std::vector<LoggedFeatureSpec> &LoggedSpecs,
+                 const TensorSpec &RewardSpec, bool IncludeReward)
+      : LoggedFeatureSpecs(LoggedSpecs), RewardSpec(RewardSpec) {
+    auto *FL = SE.mutable_feature_lists()->mutable_feature_list();
+    if (IncludeReward)
+      Reward = &(*FL)[RewardSpec.name()];
+    // Allocate first the map entries, then capture their address. We will not
+    // mutate the set of features after this (i.e. the pointers won't dangle).
+    for (const auto &LFS : LoggedSpecs) {
+      (*FL)[LFS.LoggingName ? *LFS.LoggingName : LFS.Spec.name()] = {};
+    }
+    for (const auto &LFS : LoggedSpecs)
+      FeatureLists.push_back(
+          &(*FL)[LFS.LoggingName ? *LFS.LoggingName : LFS.Spec.name()]);
+  }
+
+  void print(raw_ostream &OS) {
+    std::string OutStr;
+    if (ProtobufTextMode)
+      google::protobuf::TextFormat::PrintToString(SE, &OutStr);
+    else
+      OutStr = SE.SerializeAsString();
+
+    OS << OutStr;
+  }
+
+  char *addNewTensor(size_t FeatureID) {
+    const auto &Spec = LoggedFeatureSpecs[FeatureID].Spec;
+    if (Spec.isElementType<float>()) {
+      auto *RF = FeatureLists[FeatureID]
+                     ->add_feature()
+                     ->mutable_float_list()
+                     ->mutable_value();
+      RF->Resize(Spec.getElementCount(), 0.0);
+      return reinterpret_cast<char *>(RF->mutable_data());
+    } else if (Spec.isElementType<int32_t>() || Spec.isElementType<int64_t>()) {
+      auto *RF = FeatureLists[FeatureID]
+                     ->add_feature()
+                     ->mutable_int64_list()
+                     ->mutable_value();
+      RF->Resize(Spec.getElementCount(), 0);
+      return reinterpret_cast<char *>(RF->mutable_data());
+    }
+    llvm_unreachable("Unsupported tensor type.");
+  }
+
+  template <typename T> void logReward(T Value) {
+    if (RewardSpec.isElementType<float>())
+      Reward->add_feature()->mutable_float_list()->add_value(Value);
+    else if (RewardSpec.isElementType<int32_t>() ||
+             RewardSpec.isElementType<int64_t>())
+      Reward->add_feature()->mutable_int64_list()->add_value(Value);
+    else
+      llvm_unreachable("Unsupported tensor type.");
+  }
+
+  size_t getNrRecords() const {
+    return FeatureLists.empty() ? 0 : FeatureLists[0]->feature().size();
+  }
+};
 } // namespace llvm
 
 TFModelEvaluatorImpl::TFModelEvaluatorImpl(
@@ -476,30 +472,71 @@
 TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
 TFModelEvaluator::~TFModelEvaluator() {}
 
-void Logger::print(raw_ostream &OS) {
-  if (RawLogData.empty())
-    return;
-  if (RawLogData[0].empty())
-    return;
-  size_t Tensor0Size = FeatureSpecs[0].Spec.getElementCount() *
-                       FeatureSpecs[0].Spec.getElementByteSize();
-  size_t NumberOfRecords = RawLogData[0].size() / Tensor0Size;
-  if (NumberOfRecords == 0)
-    return;
-  size_t RewardSize =
-      RewardSpec.getElementCount() * RewardSpec.getElementByteSize();
-  size_t NumberOfRewards = RawLogData.back().size() / RewardSize;
+Logger::Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
+               const TensorSpec &RewardSpec, bool IncludeReward)
+    : FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec),
+      IncludeReward(IncludeReward),
+      LoggerData(std::make_unique<LoggerDataImpl>(FeatureSpecs, RewardSpec,
+                                                  IncludeReward)) {}
 
-  OS << "feature_lists: {\n";
-  for (size_t I = 0; I < FeatureSpecs.size(); ++I)
-    writeRawTensorsAsFeatureLists(OS, FeatureSpecs[I], RawLogData[I].data(),
-                                  NumberOfRecords);
+Logger::~Logger() {}
 
-  if (IncludeReward)
-    writeRawTensorsAsFeatureLists(OS, {RewardSpec, None},
-                                  RawLogData.back().data(), NumberOfRecords,
-                                  NumberOfRewards == 1);
+#define LOG_REWARD(NAME, TYPE)                                                 \
+  void Logger::log##NAME##Reward(TYPE Value) {                                 \
+    assert(IncludeReward);                                                     \
+    LoggerData->logReward(Value);                                              \
+  }
 
-  OS << "}\n";
+LOG_REWARD(Float, float)
+LOG_REWARD(Int32, int32_t)
+LOG_REWARD(Int64, int64_t)
+#undef LOG_REWARD
+
+#define LOG_FINAL_REWARD(NAME, TYPE)                                           \
+  void Logger::log##NAME##FinalReward(TYPE Value) {                            \
+    assert(RewardSpec.isElementType<TYPE>());                                  \
+    for (size_t I = 1; I < LoggerData->getNrRecords(); ++I)                    \
+      log##NAME##Reward(0);                                                    \
+    log##NAME##Reward(Value);                                                  \
+  }
+
+LOG_FINAL_REWARD(Float, float)
+LOG_FINAL_REWARD(Int32, int32_t)
+LOG_FINAL_REWARD(Int64, int64_t)
+#undef LOG_FINAL_REWARD
+
+void Logger::logFloatValue(size_t FeatureID, const float *Value) {
+  assert(FeatureSpecs[FeatureID].Spec.isElementType<float>());
+  logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
 }
+
+void Logger::logInt64Value(size_t FeatureID, const int64_t *Value) {
+  assert(FeatureSpecs[FeatureID].Spec.isElementType<int64_t>());
+  logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
+}
+
+void Logger::logInt32Value(size_t FeatureID, const int32_t *Value) {
+  assert(FeatureSpecs[FeatureID].Spec.isElementType<int32_t>());
+  logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
+}
+
+void Logger::logSpecifiedTensorValue(size_t FeatureID, const char *RawData) {
+  const auto &Spec = FeatureSpecs[FeatureID].Spec;
+  char *Buff = addEntryAndGetFloatOrInt64Buffer(FeatureID);
+  if (Spec.isElementType<int32_t>())
+    for (size_t I = 0; I < Spec.getElementCount(); ++I)
+      (reinterpret_cast<int64_t *>(Buff))[I] =
+          static_cast<int64_t>((reinterpret_cast<const int32_t *>(RawData))[I]);
+  else if (Spec.isElementType<int64_t>() || Spec.isElementType<float>())
+    std::memcpy(Buff, RawData,
+                Spec.getElementCount() * Spec.getElementByteSize());
+  else
+    llvm_unreachable("Unsupported tensor type");
+}
+
+char *Logger::addEntryAndGetFloatOrInt64Buffer(size_t FeatureID) {
+  return reinterpret_cast<char *>(LoggerData->addNewTensor(FeatureID));
+}
+
+void Logger::print(raw_ostream &OS) { LoggerData->print(OS); }
 #endif // defined(LLVM_HAVE_TF_API)
diff --git a/src/llvm-project/llvm/lib/Analysis/TargetLibraryInfo.cpp b/src/llvm-project/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 7e90e8c..fa5a5c5 100644
--- a/src/llvm-project/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -24,6 +24,8 @@
                           "No vector functions library"),
                clEnumValN(TargetLibraryInfoImpl::Accelerate, "Accelerate",
                           "Accelerate framework"),
+               clEnumValN(TargetLibraryInfoImpl::DarwinLibSystemM,
+                          "Darwin_libsystem_m", "Darwin libsystem_m"),
                clEnumValN(TargetLibraryInfoImpl::LIBMVEC_X86, "LIBMVEC-X86",
                           "GLIBC Vector Math library"),
                clEnumValN(TargetLibraryInfoImpl::MASSV, "MASSV",
@@ -65,6 +67,49 @@
   return TT.isOSFreeBSD() || TT.isOSSolaris();
 }
 
+static bool isCallingConvCCompatible(CallingConv::ID CC, StringRef TT,
+                                     FunctionType *FuncTy) {
+  switch (CC) {
+  default:
+    return false;
+  case llvm::CallingConv::C:
+    return true;
+  case llvm::CallingConv::ARM_APCS:
+  case llvm::CallingConv::ARM_AAPCS:
+  case llvm::CallingConv::ARM_AAPCS_VFP: {
+
+    // The iOS ABI diverges from the standard in some cases, so for now don't
+    // try to simplify those calls.
+    if (Triple(TT).isiOS())
+      return false;
+
+    if (!FuncTy->getReturnType()->isPointerTy() &&
+        !FuncTy->getReturnType()->isIntegerTy() &&
+        !FuncTy->getReturnType()->isVoidTy())
+      return false;
+
+    for (auto *Param : FuncTy->params()) {
+      if (!Param->isPointerTy() && !Param->isIntegerTy())
+        return false;
+    }
+    return true;
+  }
+  }
+  return false;
+}
+
+bool TargetLibraryInfoImpl::isCallingConvCCompatible(CallBase *CI) {
+  return ::isCallingConvCCompatible(CI->getCallingConv(),
+                                    CI->getModule()->getTargetTriple(),
+                                    CI->getFunctionType());
+}
+
+bool TargetLibraryInfoImpl::isCallingConvCCompatible(Function *F) {
+  return ::isCallingConvCCompatible(F->getCallingConv(),
+                                    F->getParent()->getTargetTriple(),
+                                    F->getFunctionType());
+}
+
 /// Initialize the set of available library functions based on the specified
 /// target triple. This should be carefully written so that a missing target
 /// triple gets a sane set of defaults.
@@ -106,6 +151,11 @@
   TLI.setShouldExtI32Return(ShouldExtI32Return);
   TLI.setShouldSignExtI32Param(ShouldSignExtI32Param);
 
+  // Let's assume by default that the size of int is 32 bits, unless the target
+  // is a 16-bit architecture because then it most likely is 16 bits. If that
+  // isn't true for a target those defaults should be overridden below.
+  TLI.setIntSize(T.isArch16Bit() ? 16 : 32);
+
   if (T.isAMDGPU())
     TLI.disableAllFunctions();
 
@@ -115,6 +165,8 @@
     TLI.setUnavailable(LibFunc_memcpy);
     TLI.setUnavailable(LibFunc_memset);
     TLI.setUnavailable(LibFunc_memset_pattern16);
+    TLI.setAvailable(llvm::LibFunc___kmpc_alloc_shared);
+    TLI.setAvailable(llvm::LibFunc___kmpc_free_shared);
     return;
   }
 
@@ -307,59 +359,63 @@
     // Win32 does not support these functions, but
     // they are generally available on POSIX-compliant systems.
     TLI.setUnavailable(LibFunc_access);
+    TLI.setUnavailable(LibFunc_chmod);
+    TLI.setUnavailable(LibFunc_closedir);
+    TLI.setUnavailable(LibFunc_fdopen);
+    TLI.setUnavailable(LibFunc_fileno);
+    TLI.setUnavailable(LibFunc_fseeko);
+    TLI.setUnavailable(LibFunc_fstat);
+    TLI.setUnavailable(LibFunc_ftello);
+    TLI.setUnavailable(LibFunc_gettimeofday);
+    TLI.setUnavailable(LibFunc_memccpy);
+    TLI.setUnavailable(LibFunc_mkdir);
+    TLI.setUnavailable(LibFunc_open);
+    TLI.setUnavailable(LibFunc_opendir);
+    TLI.setUnavailable(LibFunc_pclose);
+    TLI.setUnavailable(LibFunc_popen);
+    TLI.setUnavailable(LibFunc_read);
+    TLI.setUnavailable(LibFunc_rmdir);
+    TLI.setUnavailable(LibFunc_stat);
+    TLI.setUnavailable(LibFunc_strcasecmp);
+    TLI.setUnavailable(LibFunc_strncasecmp);
+    TLI.setUnavailable(LibFunc_unlink);
+    TLI.setUnavailable(LibFunc_utime);
+    TLI.setUnavailable(LibFunc_write);
+  }
+
+  if (T.isOSWindows() && !T.isWindowsCygwinEnvironment()) {
+    // These functions aren't available in either MSVC or MinGW environments.
     TLI.setUnavailable(LibFunc_bcmp);
     TLI.setUnavailable(LibFunc_bcopy);
     TLI.setUnavailable(LibFunc_bzero);
-    TLI.setUnavailable(LibFunc_chmod);
     TLI.setUnavailable(LibFunc_chown);
-    TLI.setUnavailable(LibFunc_closedir);
     TLI.setUnavailable(LibFunc_ctermid);
-    TLI.setUnavailable(LibFunc_fdopen);
     TLI.setUnavailable(LibFunc_ffs);
-    TLI.setUnavailable(LibFunc_fileno);
     TLI.setUnavailable(LibFunc_flockfile);
-    TLI.setUnavailable(LibFunc_fseeko);
-    TLI.setUnavailable(LibFunc_fstat);
     TLI.setUnavailable(LibFunc_fstatvfs);
-    TLI.setUnavailable(LibFunc_ftello);
     TLI.setUnavailable(LibFunc_ftrylockfile);
     TLI.setUnavailable(LibFunc_funlockfile);
     TLI.setUnavailable(LibFunc_getitimer);
     TLI.setUnavailable(LibFunc_getlogin_r);
     TLI.setUnavailable(LibFunc_getpwnam);
-    TLI.setUnavailable(LibFunc_gettimeofday);
     TLI.setUnavailable(LibFunc_htonl);
     TLI.setUnavailable(LibFunc_htons);
     TLI.setUnavailable(LibFunc_lchown);
     TLI.setUnavailable(LibFunc_lstat);
-    TLI.setUnavailable(LibFunc_memccpy);
-    TLI.setUnavailable(LibFunc_mkdir);
     TLI.setUnavailable(LibFunc_ntohl);
     TLI.setUnavailable(LibFunc_ntohs);
-    TLI.setUnavailable(LibFunc_open);
-    TLI.setUnavailable(LibFunc_opendir);
-    TLI.setUnavailable(LibFunc_pclose);
-    TLI.setUnavailable(LibFunc_popen);
     TLI.setUnavailable(LibFunc_pread);
     TLI.setUnavailable(LibFunc_pwrite);
-    TLI.setUnavailable(LibFunc_read);
     TLI.setUnavailable(LibFunc_readlink);
     TLI.setUnavailable(LibFunc_realpath);
-    TLI.setUnavailable(LibFunc_rmdir);
     TLI.setUnavailable(LibFunc_setitimer);
-    TLI.setUnavailable(LibFunc_stat);
     TLI.setUnavailable(LibFunc_statvfs);
     TLI.setUnavailable(LibFunc_stpcpy);
     TLI.setUnavailable(LibFunc_stpncpy);
-    TLI.setUnavailable(LibFunc_strcasecmp);
-    TLI.setUnavailable(LibFunc_strncasecmp);
     TLI.setUnavailable(LibFunc_times);
     TLI.setUnavailable(LibFunc_uname);
-    TLI.setUnavailable(LibFunc_unlink);
     TLI.setUnavailable(LibFunc_unsetenv);
-    TLI.setUnavailable(LibFunc_utime);
     TLI.setUnavailable(LibFunc_utimes);
-    TLI.setUnavailable(LibFunc_write);
   }
 
   switch (T.getOS()) {
@@ -547,6 +603,19 @@
   if (T.isNVPTX()) {
     TLI.disableAllFunctions();
     TLI.setAvailable(LibFunc_nvvm_reflect);
+    TLI.setAvailable(llvm::LibFunc_malloc);
+    TLI.setAvailable(llvm::LibFunc_free);
+
+    // TODO: We could enable the following two according to [0] but we haven't
+    //       done an evaluation wrt. the performance implications.
+    // [0]
+    // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#dynamic-global-memory-allocation-and-operations
+    //
+    //    TLI.setAvailable(llvm::LibFunc_memcpy);
+    //    TLI.setAvailable(llvm::LibFunc_memset);
+
+    TLI.setAvailable(llvm::LibFunc___kmpc_alloc_shared);
+    TLI.setAvailable(llvm::LibFunc___kmpc_free_shared);
   } else {
     TLI.setUnavailable(LibFunc_nvvm_reflect);
   }
@@ -579,7 +648,8 @@
 TargetLibraryInfoImpl::TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI)
     : CustomNames(TLI.CustomNames), ShouldExtI32Param(TLI.ShouldExtI32Param),
       ShouldExtI32Return(TLI.ShouldExtI32Return),
-      ShouldSignExtI32Param(TLI.ShouldSignExtI32Param) {
+      ShouldSignExtI32Param(TLI.ShouldSignExtI32Param),
+      SizeOfInt(TLI.SizeOfInt) {
   memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray));
   VectorDescs = TLI.VectorDescs;
   ScalarDescs = TLI.ScalarDescs;
@@ -589,7 +659,8 @@
     : CustomNames(std::move(TLI.CustomNames)),
       ShouldExtI32Param(TLI.ShouldExtI32Param),
       ShouldExtI32Return(TLI.ShouldExtI32Return),
-      ShouldSignExtI32Param(TLI.ShouldSignExtI32Param) {
+      ShouldSignExtI32Param(TLI.ShouldSignExtI32Param),
+      SizeOfInt(TLI.SizeOfInt) {
   std::move(std::begin(TLI.AvailableArray), std::end(TLI.AvailableArray),
             AvailableArray);
   VectorDescs = TLI.VectorDescs;
@@ -601,6 +672,7 @@
   ShouldExtI32Param = TLI.ShouldExtI32Param;
   ShouldExtI32Return = TLI.ShouldExtI32Return;
   ShouldSignExtI32Param = TLI.ShouldSignExtI32Param;
+  SizeOfInt = TLI.SizeOfInt;
   memcpy(AvailableArray, TLI.AvailableArray, sizeof(AvailableArray));
   return *this;
 }
@@ -610,6 +682,7 @@
   ShouldExtI32Param = TLI.ShouldExtI32Param;
   ShouldExtI32Return = TLI.ShouldExtI32Return;
   ShouldSignExtI32Param = TLI.ShouldSignExtI32Param;
+  SizeOfInt = TLI.SizeOfInt;
   std::move(std::begin(TLI.AvailableArray), std::end(TLI.AvailableArray),
             AvailableArray);
   return *this;
@@ -645,7 +718,6 @@
                                                    LibFunc F,
                                                    const DataLayout *DL) const {
   LLVMContext &Ctx = FTy.getContext();
-  Type *PCharTy = Type::getInt8PtrTy(Ctx);
   Type *SizeTTy = DL ? DL->getIntPtrType(Ctx, /*AddressSpace=*/0) : nullptr;
   auto IsSizeTTy = [SizeTTy](Type *Ty) {
     return SizeTTy ? Ty == SizeTTy : Ty->isIntegerTy();
@@ -727,7 +799,7 @@
   case LibFunc_stpcpy:
     return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(0) &&
             FTy.getParamType(0) == FTy.getParamType(1) &&
-            FTy.getParamType(0) == PCharTy);
+            FTy.getParamType(0)->isPointerTy());
 
   case LibFunc_strlcat_chk:
   case LibFunc_strlcpy_chk:
@@ -752,7 +824,7 @@
   case LibFunc_stpncpy:
     return (NumParams == 3 && FTy.getReturnType() == FTy.getParamType(0) &&
             FTy.getParamType(0) == FTy.getParamType(1) &&
-            FTy.getParamType(0) == PCharTy &&
+            FTy.getParamType(0)->isPointerTy() &&
             IsSizeTTy(FTy.getParamType(2)));
 
   case LibFunc_strxfrm:
@@ -821,9 +893,10 @@
            FTy.getReturnType()->isIntegerTy(32);
 
   case LibFunc_snprintf:
-    return (NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
-            FTy.getParamType(2)->isPointerTy() &&
-            FTy.getReturnType()->isIntegerTy(32));
+    return NumParams == 3 && FTy.getParamType(0)->isPointerTy() &&
+           IsSizeTTy(FTy.getParamType(1)) &&
+           FTy.getParamType(2)->isPointerTy() &&
+           FTy.getReturnType()->isIntegerTy(32);
 
   case LibFunc_snprintf_chk:
     return NumParams == 5 && FTy.getParamType(0)->isPointerTy() &&
@@ -838,6 +911,7 @@
             FTy.getParamType(2)->isPointerTy());
   case LibFunc_system:
     return (NumParams == 1 && FTy.getParamType(0)->isPointerTy());
+  case LibFunc___kmpc_alloc_shared:
   case LibFunc_malloc:
   case LibFunc_vec_malloc:
     return (NumParams == 1 && FTy.getReturnType()->isPointerTy());
@@ -895,7 +969,7 @@
   case LibFunc_realloc:
   case LibFunc_reallocf:
   case LibFunc_vec_realloc:
-    return (NumParams == 2 && FTy.getReturnType() == PCharTy &&
+    return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
             FTy.getParamType(0) == FTy.getReturnType() &&
             IsSizeTTy(FTy.getParamType(1)));
   case LibFunc_read:
@@ -923,7 +997,8 @@
     return (NumParams == 2 && FTy.getParamType(0)->isPointerTy());
   case LibFunc_calloc:
   case LibFunc_vec_calloc:
-    return (NumParams == 2 && FTy.getReturnType()->isPointerTy());
+    return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
+            FTy.getParamType(0) == FTy.getParamType(1));
 
   case LibFunc_atof:
   case LibFunc_atoi:
@@ -975,6 +1050,9 @@
   case LibFunc_times:
   case LibFunc_vec_free:
     return (NumParams != 0 && FTy.getParamType(0)->isPointerTy());
+  case LibFunc___kmpc_free_shared:
+    return (NumParams == 2 && FTy.getParamType(0)->isPointerTy() &&
+            IsSizeTTy(FTy.getParamType(1)));
 
   case LibFunc_fopen:
     return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
@@ -1026,7 +1104,7 @@
   case LibFunc_getchar_unlocked:
     return (NumParams == 0 && FTy.getReturnType()->isIntegerTy());
   case LibFunc_gets:
-    return (NumParams == 1 && FTy.getParamType(0) == PCharTy);
+    return (NumParams == 1 && FTy.getParamType(0)->isPointerTy());
   case LibFunc_getitimer:
     return (NumParams == 2 && FTy.getParamType(1)->isPointerTy());
   case LibFunc_ungetc:
@@ -1439,7 +1517,7 @@
   case LibFunc_ldexpl:
     return (NumParams == 2 && FTy.getReturnType()->isFloatingPointTy() &&
             FTy.getReturnType() == FTy.getParamType(0) &&
-            FTy.getParamType(1)->isIntegerTy(32));
+            FTy.getParamType(1)->isIntegerTy(getIntSize()));
 
   case LibFunc_ffs:
   case LibFunc_ffsl:
@@ -1482,13 +1560,13 @@
 
   case LibFunc_strnlen:
     return (NumParams == 2 && FTy.getReturnType() == FTy.getParamType(1) &&
-            FTy.getParamType(0) == PCharTy &&
-            FTy.getParamType(1) == SizeTTy);
+            FTy.getParamType(0)->isPointerTy() &&
+            IsSizeTTy(FTy.getParamType(1)));
 
   case LibFunc_posix_memalign:
     return (NumParams == 3 && FTy.getReturnType()->isIntegerTy(32) &&
             FTy.getParamType(0)->isPointerTy() &&
-            FTy.getParamType(1) == SizeTTy && FTy.getParamType(2) == SizeTTy);
+            IsSizeTTy(FTy.getParamType(1)) && IsSizeTTy(FTy.getParamType(2)));
 
   case LibFunc_wcslen:
     return (NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
@@ -1515,10 +1593,9 @@
   }
 
   case LibFunc_rust_alloc:
-    return (NumParams == 3 && FTy.getReturnType()->isPointerTy() &&
+    return (NumParams == 2 && FTy.getReturnType()->isPointerTy() &&
             FTy.getParamType(0)->isIntegerTy() &&
-            FTy.getParamType(1)->isIntegerTy() &&
-            FTy.getParamType(2)->isPointerTy());
+            FTy.getParamType(1)->isIntegerTy());
 
   case LibFunc_rust_dealloc:
     return (NumParams == 3 && FTy.getReturnType()->isVoidTy() &&
@@ -1527,13 +1604,11 @@
             FTy.getParamType(2)->isIntegerTy());
 
   case LibFunc_rust_realloc:
-    return (NumParams == 6 && FTy.getReturnType()->isPointerTy() &&
+    return (NumParams == 4 && FTy.getReturnType()->isPointerTy() &&
             FTy.getParamType(0)->isPointerTy() &&
             FTy.getParamType(1)->isIntegerTy() &&
             FTy.getParamType(2)->isIntegerTy() &&
-            FTy.getParamType(3)->isIntegerTy() &&
-            FTy.getParamType(4)->isIntegerTy() &&
-            FTy.getParamType(5)->isPointerTy());
+            FTy.getParamType(3)->isIntegerTy());
 
   case LibFunc::NumLibFuncs:
   case LibFunc::NotLibFunc:
@@ -1572,10 +1647,6 @@
   return LHS.ScalarFnName < S;
 }
 
-static bool compareWithVectorFnName(const VecDesc &LHS, StringRef S) {
-  return LHS.VectorFnName < S;
-}
-
 void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) {
   llvm::append_range(VectorDescs, Fns);
   llvm::sort(VectorDescs, compareByScalarFnName);
@@ -1595,6 +1666,14 @@
     addVectorizableFunctions(VecFuncs);
     break;
   }
+  case DarwinLibSystemM: {
+    const VecDesc VecFuncs[] = {
+    #define TLI_DEFINE_DARWIN_LIBSYSTEM_M_VECFUNCS
+    #include "llvm/Analysis/VecFuncs.def"
+    };
+    addVectorizableFunctions(VecFuncs);
+    break;
+  }
   case LIBMVEC_X86: {
     const VecDesc VecFuncs[] = {
     #define TLI_DEFINE_LIBMVEC_X86_VECFUNCS
@@ -1634,8 +1713,9 @@
   return I != VectorDescs.end() && StringRef(I->ScalarFnName) == funcName;
 }
 
-StringRef TargetLibraryInfoImpl::getVectorizedFunction(StringRef F,
-                                                       unsigned VF) const {
+StringRef
+TargetLibraryInfoImpl::getVectorizedFunction(StringRef F,
+                                             const ElementCount &VF) const {
   F = sanitizeFunctionName(F);
   if (F.empty())
     return F;
@@ -1649,20 +1729,6 @@
   return StringRef();
 }
 
-StringRef TargetLibraryInfoImpl::getScalarizedFunction(StringRef F,
-                                                       unsigned &VF) const {
-  F = sanitizeFunctionName(F);
-  if (F.empty())
-    return F;
-
-  std::vector<VecDesc>::const_iterator I =
-      llvm::lower_bound(ScalarDescs, F, compareWithVectorFnName);
-  if (I == VectorDescs.end() || StringRef(I->VectorFnName) != F)
-    return StringRef();
-  VF = I->VectorizationFactor;
-  return I->ScalarFnName;
-}
-
 TargetLibraryInfo TargetLibraryAnalysis::run(const Function &F,
                                              FunctionAnalysisManager &) {
   if (!BaselineInfoImpl)
@@ -1703,18 +1769,24 @@
 
 void TargetLibraryInfoWrapperPass::anchor() {}
 
-unsigned TargetLibraryInfoImpl::getWidestVF(StringRef ScalarF) const {
+void TargetLibraryInfoImpl::getWidestVF(StringRef ScalarF,
+                                        ElementCount &FixedVF,
+                                        ElementCount &ScalableVF) const {
   ScalarF = sanitizeFunctionName(ScalarF);
+  // Use '0' here because a type of the form <vscale x 1 x ElTy> is not the
+  // same as a scalar.
+  ScalableVF = ElementCount::getScalable(0);
+  FixedVF = ElementCount::getFixed(1);
   if (ScalarF.empty())
-    return 1;
+    return;
 
-  unsigned VF = 1;
   std::vector<VecDesc>::const_iterator I =
       llvm::lower_bound(VectorDescs, ScalarF, compareWithScalarFnName);
   while (I != VectorDescs.end() && StringRef(I->ScalarFnName) == ScalarF) {
-    if (I->VectorizationFactor > VF)
-      VF = I->VectorizationFactor;
+    ElementCount *VF =
+        I->VectorizationFactor.isScalable() ? &ScalableVF : &FixedVF;
+    if (ElementCount::isKnownGT(I->VectorizationFactor, *VF))
+      *VF = I->VectorizationFactor;
     ++I;
   }
-  return VF;
 }
diff --git a/src/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp b/src/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp
index e498401..304d24f 100644
--- a/src/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -54,86 +54,25 @@
   return true;
 }
 
-IntrinsicCostAttributes::IntrinsicCostAttributes(const IntrinsicInst &I) :
-    II(&I), RetTy(I.getType()), IID(I.getIntrinsicID()) {
-
- FunctionType *FTy = I.getCalledFunction()->getFunctionType();
- ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
- Arguments.insert(Arguments.begin(), I.arg_begin(), I.arg_end());
- if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
-   FMF = FPMO->getFastMathFlags();
-}
-
-IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
-                                                 const CallBase &CI) :
-  II(dyn_cast<IntrinsicInst>(&CI)),  RetTy(CI.getType()), IID(Id) {
+IntrinsicCostAttributes::IntrinsicCostAttributes(
+    Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost)
+    : II(dyn_cast<IntrinsicInst>(&CI)), RetTy(CI.getType()), IID(Id),
+      ScalarizationCost(ScalarizationCost) {
 
   if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
     FMF = FPMO->getFastMathFlags();
 
   Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
-  FunctionType *FTy =
-    CI.getCalledFunction()->getFunctionType();
+  FunctionType *FTy = CI.getCalledFunction()->getFunctionType();
   ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
 }
 
-IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
-                                                 const CallBase &CI,
-                                                 ElementCount Factor)
-    : RetTy(CI.getType()), IID(Id), VF(Factor) {
-
-  assert(!Factor.isScalable() && "Scalable vectors are not yet supported");
-  if (auto *FPMO = dyn_cast<FPMathOperator>(&CI))
-    FMF = FPMO->getFastMathFlags();
-
-  Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
-  FunctionType *FTy =
-    CI.getCalledFunction()->getFunctionType();
-  ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
-}
-
-IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id,
-                                                 const CallBase &CI,
-                                                 ElementCount Factor,
-                                                 unsigned ScalarCost)
-    : RetTy(CI.getType()), IID(Id), VF(Factor), ScalarizationCost(ScalarCost) {
-
-  if (const auto *FPMO = dyn_cast<FPMathOperator>(&CI))
-    FMF = FPMO->getFastMathFlags();
-
-  Arguments.insert(Arguments.begin(), CI.arg_begin(), CI.arg_end());
-  FunctionType *FTy =
-    CI.getCalledFunction()->getFunctionType();
-  ParamTys.insert(ParamTys.begin(), FTy->param_begin(), FTy->param_end());
-}
-
-IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
-                                                 ArrayRef<Type *> Tys,
-                                                 FastMathFlags Flags) :
-    RetTy(RTy), IID(Id), FMF(Flags) {
-  ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
-}
-
 IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
                                                  ArrayRef<Type *> Tys,
                                                  FastMathFlags Flags,
-                                                 unsigned ScalarCost) :
-    RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
-  ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
-}
-
-IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
-                                                 ArrayRef<Type *> Tys,
-                                                 FastMathFlags Flags,
-                                                 unsigned ScalarCost,
-                                                 const IntrinsicInst *I) :
-    II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
-  ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
-}
-
-IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
-                                                 ArrayRef<Type *> Tys) :
-    RetTy(RTy), IID(Id) {
+                                                 const IntrinsicInst *I,
+                                                 InstructionCost ScalarCost)
+    : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
   ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
 }
 
@@ -147,6 +86,17 @@
     ParamTys.push_back(Arguments[Idx]->getType());
 }
 
+IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
+                                                 ArrayRef<const Value *> Args,
+                                                 ArrayRef<Type *> Tys,
+                                                 FastMathFlags Flags,
+                                                 const IntrinsicInst *I,
+                                                 InstructionCost ScalarCost)
+    : II(I), RetTy(RTy), IID(Id), FMF(Flags), ScalarizationCost(ScalarCost) {
+  ParamTys.insert(ParamTys.begin(), Tys.begin(), Tys.end());
+  Arguments.insert(Arguments.begin(), Args.begin(), Args.end());
+}
+
 bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
                                                LoopInfo &LI, DominatorTree &DT,
                                                bool ForceNestedLoop,
@@ -256,9 +206,10 @@
   return TTIImpl->getInlinerVectorBonusPercent();
 }
 
-int TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr,
-                                    ArrayRef<const Value *> Operands,
-                                    TTI::TargetCostKind CostKind) const {
+InstructionCost
+TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr,
+                                ArrayRef<const Value *> Operands,
+                                TTI::TargetCostKind CostKind) const {
   return TTIImpl->getGEPCost(PointeeType, Ptr, Operands, CostKind);
 }
 
@@ -268,15 +219,20 @@
   return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize, PSI, BFI);
 }
 
-int TargetTransformInfo::getUserCost(const User *U,
-                                     ArrayRef<const Value *> Operands,
-                                     enum TargetCostKind CostKind) const {
-  int Cost = TTIImpl->getUserCost(U, Operands, CostKind);
+InstructionCost
+TargetTransformInfo::getUserCost(const User *U,
+                                 ArrayRef<const Value *> Operands,
+                                 enum TargetCostKind CostKind) const {
+  InstructionCost Cost = TTIImpl->getUserCost(U, Operands, CostKind);
   assert((CostKind == TTI::TCK_RecipThroughput || Cost >= 0) &&
          "TTI should not produce negative costs!");
   return Cost;
 }
 
+BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const {
+  return TTIImpl->getPredictableBranchThreshold();
+}
+
 bool TargetTransformInfo::hasBranchDivergence() const {
   return TTIImpl->hasBranchDivergence();
 }
@@ -410,12 +366,10 @@
   return TTIImpl->canSaveCmp(L, BI, SE, LI, DT, AC, LibInfo);
 }
 
-bool TargetTransformInfo::shouldFavorPostInc() const {
-  return TTIImpl->shouldFavorPostInc();
-}
-
-bool TargetTransformInfo::shouldFavorBackedgeIndex(const Loop *L) const {
-  return TTIImpl->shouldFavorBackedgeIndex(L);
+TTI::AddressingModeKind
+TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
+                                                ScalarEvolution *SE) const {
+  return TTIImpl->getPreferredAddressingMode(L, SE);
 }
 
 bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
@@ -468,12 +422,11 @@
   return TTIImpl->prefersVectorizedAddressing();
 }
 
-int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
-                                              int64_t BaseOffset,
-                                              bool HasBaseReg, int64_t Scale,
-                                              unsigned AddrSpace) const {
-  int Cost = TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
-                                           Scale, AddrSpace);
+InstructionCost TargetTransformInfo::getScalingFactorCost(
+    Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, bool HasBaseReg,
+    int64_t Scale, unsigned AddrSpace) const {
+  InstructionCost Cost = TTIImpl->getScalingFactorCost(
+      Ty, BaseGV, BaseOffset, HasBaseReg, Scale, AddrSpace);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
@@ -496,32 +449,37 @@
   return TTIImpl->isTypeLegal(Ty);
 }
 
-unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const {
+InstructionCost TargetTransformInfo::getRegUsageForType(Type *Ty) const {
   return TTIImpl->getRegUsageForType(Ty);
 }
 
 bool TargetTransformInfo::shouldBuildLookupTables() const {
   return TTIImpl->shouldBuildLookupTables();
 }
+
 bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
     Constant *C) const {
   return TTIImpl->shouldBuildLookupTablesForConstant(C);
 }
 
+bool TargetTransformInfo::shouldBuildRelLookupTables() const {
+  return TTIImpl->shouldBuildRelLookupTables();
+}
+
 bool TargetTransformInfo::useColdCCForColdCall(Function &F) const {
   return TTIImpl->useColdCCForColdCall(F);
 }
 
-unsigned
+InstructionCost
 TargetTransformInfo::getScalarizationOverhead(VectorType *Ty,
                                               const APInt &DemandedElts,
                                               bool Insert, bool Extract) const {
   return TTIImpl->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
 }
 
-unsigned TargetTransformInfo::getOperandsScalarizationOverhead(
-    ArrayRef<const Value *> Args, unsigned VF) const {
-  return TTIImpl->getOperandsScalarizationOverhead(Args, VF);
+InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead(
+    ArrayRef<const Value *> Args, ArrayRef<Type *> Tys) const {
+  return TTIImpl->getOperandsScalarizationOverhead(Args, Tys);
 }
 
 bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const {
@@ -553,7 +511,7 @@
 bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
                                                          unsigned BitWidth,
                                                          unsigned AddressSpace,
-                                                         unsigned Alignment,
+                                                         Align Alignment,
                                                          bool *Fast) const {
   return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
                                                  AddressSpace, Alignment, Fast);
@@ -572,41 +530,44 @@
   return TTIImpl->isFCmpOrdCheaperThanFCmpZero(Ty);
 }
 
-int TargetTransformInfo::getFPOpCost(Type *Ty) const {
-  int Cost = TTIImpl->getFPOpCost(Ty);
+InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const {
+  InstructionCost Cost = TTIImpl->getFPOpCost(Ty);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
-                                               const APInt &Imm,
-                                               Type *Ty) const {
-  int Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
+InstructionCost TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode,
+                                                           unsigned Idx,
+                                                           const APInt &Imm,
+                                                           Type *Ty) const {
+  InstructionCost Cost = TTIImpl->getIntImmCodeSizeCost(Opcode, Idx, Imm, Ty);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
-                                       TTI::TargetCostKind CostKind) const {
-  int Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
+InstructionCost
+TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
+                                   TTI::TargetCostKind CostKind) const {
+  InstructionCost Cost = TTIImpl->getIntImmCost(Imm, Ty, CostKind);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getIntImmCostInst(unsigned Opcode, unsigned Idx,
-                                           const APInt &Imm, Type *Ty,
-                                           TTI::TargetCostKind CostKind,
-                                           Instruction *Inst) const {
-  int Cost = TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
+InstructionCost TargetTransformInfo::getIntImmCostInst(
+    unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
+    TTI::TargetCostKind CostKind, Instruction *Inst) const {
+  InstructionCost Cost =
+      TTIImpl->getIntImmCostInst(Opcode, Idx, Imm, Ty, CostKind, Inst);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int
+InstructionCost
 TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                          const APInt &Imm, Type *Ty,
                                          TTI::TargetCostKind CostKind) const {
-  int Cost = TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
+  InstructionCost Cost =
+      TTIImpl->getIntImmCostIntrin(IID, Idx, Imm, Ty, CostKind);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
@@ -624,8 +585,9 @@
   return TTIImpl->getRegisterClassName(ClassID);
 }
 
-unsigned TargetTransformInfo::getRegisterBitWidth(bool Vector) const {
-  return TTIImpl->getRegisterBitWidth(Vector);
+TypeSize TargetTransformInfo::getRegisterBitWidth(
+    TargetTransformInfo::RegisterKind K) const {
+  return TTIImpl->getRegisterBitWidth(K);
 }
 
 unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const {
@@ -636,12 +598,13 @@
   return TTIImpl->getMaxVScale();
 }
 
-bool TargetTransformInfo::shouldMaximizeVectorBandwidth(bool OptSize) const {
-  return TTIImpl->shouldMaximizeVectorBandwidth(OptSize);
+bool TargetTransformInfo::shouldMaximizeVectorBandwidth() const {
+  return TTIImpl->shouldMaximizeVectorBandwidth();
 }
 
-unsigned TargetTransformInfo::getMinimumVF(unsigned ElemWidth) const {
-  return TTIImpl->getMinimumVF(ElemWidth);
+ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth,
+                                               bool IsScalable) const {
+  return TTIImpl->getMinimumVF(ElemWidth, IsScalable);
 }
 
 unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
@@ -742,22 +705,24 @@
   return OpInfo;
 }
 
-int TargetTransformInfo::getArithmeticInstrCost(
+InstructionCost TargetTransformInfo::getArithmeticInstrCost(
     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
-    OperandValueKind Opd1Info,
-    OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
-    OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
-    const Instruction *CxtI) const {
-  int Cost = TTIImpl->getArithmeticInstrCost(
-      Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo,
-      Args, CxtI);
+    OperandValueKind Opd1Info, OperandValueKind Opd2Info,
+    OperandValueProperties Opd1PropInfo, OperandValueProperties Opd2PropInfo,
+    ArrayRef<const Value *> Args, const Instruction *CxtI) const {
+  InstructionCost Cost =
+      TTIImpl->getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
+                                      Opd1PropInfo, Opd2PropInfo, Args, CxtI);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getShuffleCost(ShuffleKind Kind, VectorType *Ty,
-                                        int Index, VectorType *SubTp) const {
-  int Cost = TTIImpl->getShuffleCost(Kind, Ty, Index, SubTp);
+InstructionCost TargetTransformInfo::getShuffleCost(ShuffleKind Kind,
+                                                    VectorType *Ty,
+                                                    ArrayRef<int> Mask,
+                                                    int Index,
+                                                    VectorType *SubTp) const {
+  InstructionCost Cost = TTIImpl->getShuffleCost(Kind, Ty, Mask, Index, SubTp);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
@@ -806,106 +771,106 @@
   return TTI::CastContextHint::None;
 }
 
-int TargetTransformInfo::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
-                                          CastContextHint CCH,
-                                          TTI::TargetCostKind CostKind,
-                                          const Instruction *I) const {
+InstructionCost TargetTransformInfo::getCastInstrCost(
+    unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
+    TTI::TargetCostKind CostKind, const Instruction *I) const {
   assert((I == nullptr || I->getOpcode() == Opcode) &&
          "Opcode should reflect passed instruction.");
-  int Cost = TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
+  InstructionCost Cost =
+      TTIImpl->getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
-                                                  VectorType *VecTy,
-                                                  unsigned Index) const {
-  int Cost = TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
+InstructionCost TargetTransformInfo::getExtractWithExtendCost(
+    unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const {
+  InstructionCost Cost =
+      TTIImpl->getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getCFInstrCost(unsigned Opcode,
-                                        TTI::TargetCostKind CostKind) const {
-  int Cost = TTIImpl->getCFInstrCost(Opcode, CostKind);
-  assert(Cost >= 0 && "TTI should not produce negative costs!");
-  return Cost;
-}
-
-int TargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
-                                            Type *CondTy,
-                                            CmpInst::Predicate VecPred,
-                                            TTI::TargetCostKind CostKind,
-                                            const Instruction *I) const {
+InstructionCost TargetTransformInfo::getCFInstrCost(
+    unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const {
   assert((I == nullptr || I->getOpcode() == Opcode) &&
          "Opcode should reflect passed instruction.");
-  int Cost =
+  InstructionCost Cost = TTIImpl->getCFInstrCost(Opcode, CostKind, I);
+  assert(Cost >= 0 && "TTI should not produce negative costs!");
+  return Cost;
+}
+
+InstructionCost TargetTransformInfo::getCmpSelInstrCost(
+    unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
+    TTI::TargetCostKind CostKind, const Instruction *I) const {
+  assert((I == nullptr || I->getOpcode() == Opcode) &&
+         "Opcode should reflect passed instruction.");
+  InstructionCost Cost =
       TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val,
-                                            unsigned Index) const {
-  int Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
+InstructionCost TargetTransformInfo::getVectorInstrCost(unsigned Opcode,
+                                                        Type *Val,
+                                                        unsigned Index) const {
+  InstructionCost Cost = TTIImpl->getVectorInstrCost(Opcode, Val, Index);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getMemoryOpCost(unsigned Opcode, Type *Src,
-                                         Align Alignment, unsigned AddressSpace,
-                                         TTI::TargetCostKind CostKind,
-                                         const Instruction *I) const {
+InstructionCost TargetTransformInfo::getMemoryOpCost(
+    unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
+    TTI::TargetCostKind CostKind, const Instruction *I) const {
   assert((I == nullptr || I->getOpcode() == Opcode) &&
          "Opcode should reflect passed instruction.");
-  int Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
-                                      CostKind, I);
+  InstructionCost Cost = TTIImpl->getMemoryOpCost(Opcode, Src, Alignment,
+                                                  AddressSpace, CostKind, I);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getMaskedMemoryOpCost(
+InstructionCost TargetTransformInfo::getMaskedMemoryOpCost(
     unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
     TTI::TargetCostKind CostKind) const {
-  int Cost =
-      TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
-                                     CostKind);
+  InstructionCost Cost = TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment,
+                                                        AddressSpace, CostKind);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getGatherScatterOpCost(
+InstructionCost TargetTransformInfo::getGatherScatterOpCost(
     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const {
-  int Cost = TTIImpl->getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
-                                             Alignment, CostKind, I);
+  InstructionCost Cost = TTIImpl->getGatherScatterOpCost(
+      Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getInterleavedMemoryOpCost(
+InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost(
     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
     bool UseMaskForCond, bool UseMaskForGaps) const {
-  int Cost = TTIImpl->getInterleavedMemoryOpCost(
+  InstructionCost Cost = TTIImpl->getInterleavedMemoryOpCost(
       Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind,
       UseMaskForCond, UseMaskForGaps);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int
+InstructionCost
 TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                            TTI::TargetCostKind CostKind) const {
-  int Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
+  InstructionCost Cost = TTIImpl->getIntrinsicInstrCost(ICA, CostKind);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
-                                          ArrayRef<Type *> Tys,
-                                          TTI::TargetCostKind CostKind) const {
-  int Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
+InstructionCost
+TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
+                                      ArrayRef<Type *> Tys,
+                                      TTI::TargetCostKind CostKind) const {
+  InstructionCost Cost = TTIImpl->getCallInstrCost(F, RetTy, Tys, CostKind);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
@@ -914,36 +879,34 @@
   return TTIImpl->getNumberOfParts(Tp);
 }
 
-int TargetTransformInfo::getAddressComputationCost(Type *Tp,
-                                                   ScalarEvolution *SE,
-                                                   const SCEV *Ptr) const {
-  int Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
+InstructionCost
+TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE,
+                                               const SCEV *Ptr) const {
+  InstructionCost Cost = TTIImpl->getAddressComputationCost(Tp, SE, Ptr);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
-  int Cost = TTIImpl->getMemcpyCost(I);
+InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const {
+  InstructionCost Cost = TTIImpl->getMemcpyCost(I);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
 
-int TargetTransformInfo::getArithmeticReductionCost(unsigned Opcode,
-                                                    VectorType *Ty,
-                                                    bool IsPairwiseForm,
-                                                    TTI::TargetCostKind CostKind) const {
-  int Cost = TTIImpl->getArithmeticReductionCost(Opcode, Ty, IsPairwiseForm,
-                                                 CostKind);
-  assert(Cost >= 0 && "TTI should not produce negative costs!");
-  return Cost;
-}
-
-int TargetTransformInfo::getMinMaxReductionCost(
-    VectorType *Ty, VectorType *CondTy, bool IsPairwiseForm, bool IsUnsigned,
+InstructionCost TargetTransformInfo::getArithmeticReductionCost(
+    unsigned Opcode, VectorType *Ty, Optional<FastMathFlags> FMF,
     TTI::TargetCostKind CostKind) const {
-  int Cost =
-      TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsPairwiseForm, IsUnsigned,
-                                      CostKind);
+  InstructionCost Cost =
+      TTIImpl->getArithmeticReductionCost(Opcode, Ty, FMF, CostKind);
+  assert(Cost >= 0 && "TTI should not produce negative costs!");
+  return Cost;
+}
+
+InstructionCost TargetTransformInfo::getMinMaxReductionCost(
+    VectorType *Ty, VectorType *CondTy, bool IsUnsigned,
+    TTI::TargetCostKind CostKind) const {
+  InstructionCost Cost =
+      TTIImpl->getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
   assert(Cost >= 0 && "TTI should not produce negative costs!");
   return Cost;
 }
@@ -955,7 +918,7 @@
                                               CostKind);
 }
 
-unsigned
+InstructionCost
 TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
   return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
 }
@@ -1035,6 +998,15 @@
                                                AddrSpace);
 }
 
+bool TargetTransformInfo::isLegalToVectorizeReduction(
+    const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
+  return TTIImpl->isLegalToVectorizeReduction(RdxDesc, VF);
+}
+
+bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const {
+  return TTIImpl->isElementTypeLegalForScalableVector(Ty);
+}
+
 unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF,
                                                   unsigned LoadSize,
                                                   unsigned ChainSizeInBytes,
@@ -1049,11 +1021,6 @@
   return TTIImpl->getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
 }
 
-bool TargetTransformInfo::useReductionIntrinsic(unsigned Opcode, Type *Ty,
-                                                ReductionFlags Flags) const {
-  return TTIImpl->useReductionIntrinsic(Opcode, Ty, Flags);
-}
-
 bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
                                                 ReductionFlags Flags) const {
   return TTIImpl->preferInLoopReduction(Opcode, Ty, Flags);
@@ -1064,6 +1031,11 @@
   return TTIImpl->preferPredicatedReductionSelect(Opcode, Ty, Flags);
 }
 
+TargetTransformInfo::VPLegalization
+TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const {
+  return TTIImpl->getVPLegalizationStrategy(VPI);
+}
+
 bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const {
   return TTIImpl->shouldExpandReduction(II);
 }
@@ -1076,296 +1048,17 @@
   return TTIImpl->supportsScalableVectors();
 }
 
-int TargetTransformInfo::getInstructionLatency(const Instruction *I) const {
+bool TargetTransformInfo::hasActiveVectorLength() const {
+  return TTIImpl->hasActiveVectorLength();
+}
+
+InstructionCost
+TargetTransformInfo::getInstructionLatency(const Instruction *I) const {
   return TTIImpl->getInstructionLatency(I);
 }
 
-static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
-                                     unsigned Level) {
-  // We don't need a shuffle if we just want to have element 0 in position 0 of
-  // the vector.
-  if (!SI && Level == 0 && IsLeft)
-    return true;
-  else if (!SI)
-    return false;
-
-  SmallVector<int, 32> Mask(
-      cast<FixedVectorType>(SI->getType())->getNumElements(), -1);
-
-  // Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
-  // we look at the left or right side.
-  for (unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
-    Mask[i] = val;
-
-  ArrayRef<int> ActualMask = SI->getShuffleMask();
-  return Mask == ActualMask;
-}
-
-static Optional<TTI::ReductionData> getReductionData(Instruction *I) {
-  Value *L, *R;
-  if (m_BinOp(m_Value(L), m_Value(R)).match(I))
-    return TTI::ReductionData(TTI::RK_Arithmetic, I->getOpcode(), L, R);
-  if (auto *SI = dyn_cast<SelectInst>(I)) {
-    if (m_SMin(m_Value(L), m_Value(R)).match(SI) ||
-        m_SMax(m_Value(L), m_Value(R)).match(SI) ||
-        m_OrdFMin(m_Value(L), m_Value(R)).match(SI) ||
-        m_OrdFMax(m_Value(L), m_Value(R)).match(SI) ||
-        m_UnordFMin(m_Value(L), m_Value(R)).match(SI) ||
-        m_UnordFMax(m_Value(L), m_Value(R)).match(SI)) {
-      auto *CI = cast<CmpInst>(SI->getCondition());
-      return TTI::ReductionData(TTI::RK_MinMax, CI->getOpcode(), L, R);
-    }
-    if (m_UMin(m_Value(L), m_Value(R)).match(SI) ||
-        m_UMax(m_Value(L), m_Value(R)).match(SI)) {
-      auto *CI = cast<CmpInst>(SI->getCondition());
-      return TTI::ReductionData(TTI::RK_UnsignedMinMax, CI->getOpcode(), L, R);
-    }
-  }
-  return llvm::None;
-}
-
-static TTI::ReductionKind matchPairwiseReductionAtLevel(Instruction *I,
-                                                        unsigned Level,
-                                                        unsigned NumLevels) {
-  // Match one level of pairwise operations.
-  // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
-  //       <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
-  // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
-  //       <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
-  // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
-  if (!I)
-    return TTI::RK_None;
-
-  assert(I->getType()->isVectorTy() && "Expecting a vector type");
-
-  Optional<TTI::ReductionData> RD = getReductionData(I);
-  if (!RD)
-    return TTI::RK_None;
-
-  ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(RD->LHS);
-  if (!LS && Level)
-    return TTI::RK_None;
-  ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(RD->RHS);
-  if (!RS && Level)
-    return TTI::RK_None;
-
-  // On level 0 we can omit one shufflevector instruction.
-  if (!Level && !RS && !LS)
-    return TTI::RK_None;
-
-  // Shuffle inputs must match.
-  Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
-  Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
-  Value *NextLevelOp = nullptr;
-  if (NextLevelOpR && NextLevelOpL) {
-    // If we have two shuffles their operands must match.
-    if (NextLevelOpL != NextLevelOpR)
-      return TTI::RK_None;
-
-    NextLevelOp = NextLevelOpL;
-  } else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
-    // On the first level we can omit the shufflevector <0, undef,...>. So the
-    // input to the other shufflevector <1, undef> must match with one of the
-    // inputs to the current binary operation.
-    // Example:
-    //  %NextLevelOpL = shufflevector %R, <1, undef ...>
-    //  %BinOp        = fadd          %NextLevelOpL, %R
-    if (NextLevelOpL && NextLevelOpL != RD->RHS)
-      return TTI::RK_None;
-    else if (NextLevelOpR && NextLevelOpR != RD->LHS)
-      return TTI::RK_None;
-
-    NextLevelOp = NextLevelOpL ? RD->RHS : RD->LHS;
-  } else
-    return TTI::RK_None;
-
-  // Check that the next levels binary operation exists and matches with the
-  // current one.
-  if (Level + 1 != NumLevels) {
-    if (!isa<Instruction>(NextLevelOp))
-      return TTI::RK_None;
-    Optional<TTI::ReductionData> NextLevelRD =
-        getReductionData(cast<Instruction>(NextLevelOp));
-    if (!NextLevelRD || !RD->hasSameData(*NextLevelRD))
-      return TTI::RK_None;
-  }
-
-  // Shuffle mask for pairwise operation must match.
-  if (matchPairwiseShuffleMask(LS, /*IsLeft=*/true, Level)) {
-    if (!matchPairwiseShuffleMask(RS, /*IsLeft=*/false, Level))
-      return TTI::RK_None;
-  } else if (matchPairwiseShuffleMask(RS, /*IsLeft=*/true, Level)) {
-    if (!matchPairwiseShuffleMask(LS, /*IsLeft=*/false, Level))
-      return TTI::RK_None;
-  } else {
-    return TTI::RK_None;
-  }
-
-  if (++Level == NumLevels)
-    return RD->Kind;
-
-  // Match next level.
-  return matchPairwiseReductionAtLevel(dyn_cast<Instruction>(NextLevelOp), Level,
-                                       NumLevels);
-}
-
-TTI::ReductionKind TTI::matchPairwiseReduction(
-  const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
-  if (!EnableReduxCost)
-    return TTI::RK_None;
-
-  // Need to extract the first element.
-  ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
-  unsigned Idx = ~0u;
-  if (CI)
-    Idx = CI->getZExtValue();
-  if (Idx != 0)
-    return TTI::RK_None;
-
-  auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
-  if (!RdxStart)
-    return TTI::RK_None;
-  Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
-  if (!RD)
-    return TTI::RK_None;
-
-  auto *VecTy = cast<FixedVectorType>(RdxStart->getType());
-  unsigned NumVecElems = VecTy->getNumElements();
-  if (!isPowerOf2_32(NumVecElems))
-    return TTI::RK_None;
-
-  // We look for a sequence of shuffle,shuffle,add triples like the following
-  // that builds a pairwise reduction tree.
-  //
-  //  (X0, X1, X2, X3)
-  //   (X0 + X1, X2 + X3, undef, undef)
-  //    ((X0 + X1) + (X2 + X3), undef, undef, undef)
-  //
-  // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
-  //       <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
-  // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
-  //       <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
-  // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
-  // %rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
-  //       <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
-  // %rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
-  //       <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-  // %bin.rdx8 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
-  // %r = extractelement <4 x float> %bin.rdx8, i32 0
-  if (matchPairwiseReductionAtLevel(RdxStart, 0, Log2_32(NumVecElems)) ==
-      TTI::RK_None)
-    return TTI::RK_None;
-
-  Opcode = RD->Opcode;
-  Ty = VecTy;
-
-  return RD->Kind;
-}
-
-static std::pair<Value *, ShuffleVectorInst *>
-getShuffleAndOtherOprd(Value *L, Value *R) {
-  ShuffleVectorInst *S = nullptr;
-
-  if ((S = dyn_cast<ShuffleVectorInst>(L)))
-    return std::make_pair(R, S);
-
-  S = dyn_cast<ShuffleVectorInst>(R);
-  return std::make_pair(L, S);
-}
-
-TTI::ReductionKind TTI::matchVectorSplittingReduction(
-  const ExtractElementInst *ReduxRoot, unsigned &Opcode, VectorType *&Ty) {
-
-  if (!EnableReduxCost)
-    return TTI::RK_None;
-
-  // Need to extract the first element.
-  ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
-  unsigned Idx = ~0u;
-  if (CI)
-    Idx = CI->getZExtValue();
-  if (Idx != 0)
-    return TTI::RK_None;
-
-  auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
-  if (!RdxStart)
-    return TTI::RK_None;
-  Optional<TTI::ReductionData> RD = getReductionData(RdxStart);
-  if (!RD)
-    return TTI::RK_None;
-
-  auto *VecTy = cast<FixedVectorType>(ReduxRoot->getOperand(0)->getType());
-  unsigned NumVecElems = VecTy->getNumElements();
-  if (!isPowerOf2_32(NumVecElems))
-    return TTI::RK_None;
-
-  // We look for a sequence of shuffles and adds like the following matching one
-  // fadd, shuffle vector pair at a time.
-  //
-  // %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
-  //                           <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-  // %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
-  // %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
-  //                          <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-  // %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
-  // %r = extractelement <4 x float> %bin.rdx8, i32 0
-
-  unsigned MaskStart = 1;
-  Instruction *RdxOp = RdxStart;
-  SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
-  unsigned NumVecElemsRemain = NumVecElems;
-  while (NumVecElemsRemain - 1) {
-    // Check for the right reduction operation.
-    if (!RdxOp)
-      return TTI::RK_None;
-    Optional<TTI::ReductionData> RDLevel = getReductionData(RdxOp);
-    if (!RDLevel || !RDLevel->hasSameData(*RD))
-      return TTI::RK_None;
-
-    Value *NextRdxOp;
-    ShuffleVectorInst *Shuffle;
-    std::tie(NextRdxOp, Shuffle) =
-        getShuffleAndOtherOprd(RDLevel->LHS, RDLevel->RHS);
-
-    // Check the current reduction operation and the shuffle use the same value.
-    if (Shuffle == nullptr)
-      return TTI::RK_None;
-    if (Shuffle->getOperand(0) != NextRdxOp)
-      return TTI::RK_None;
-
-    // Check that shuffle masks matches.
-    for (unsigned j = 0; j != MaskStart; ++j)
-      ShuffleMask[j] = MaskStart + j;
-    // Fill the rest of the mask with -1 for undef.
-    std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
-
-    ArrayRef<int> Mask = Shuffle->getShuffleMask();
-    if (ShuffleMask != Mask)
-      return TTI::RK_None;
-
-    RdxOp = dyn_cast<Instruction>(NextRdxOp);
-    NumVecElemsRemain /= 2;
-    MaskStart *= 2;
-  }
-
-  Opcode = RD->Opcode;
-  Ty = VecTy;
-  return RD->Kind;
-}
-
-TTI::ReductionKind
-TTI::matchVectorReduction(const ExtractElementInst *Root, unsigned &Opcode,
-                          VectorType *&Ty, bool &IsPairwise) {
-  TTI::ReductionKind RdxKind = matchVectorSplittingReduction(Root, Opcode, Ty);
-  if (RdxKind != TTI::ReductionKind::RK_None) {
-    IsPairwise = false;
-    return RdxKind;
-  }
-  IsPairwise = true;
-  return matchPairwiseReduction(Root, Opcode, Ty);
-}
-
-int TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
+InstructionCost
+TargetTransformInfo::getInstructionThroughput(const Instruction *I) const {
   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
 
   switch (I->getOpcode()) {
@@ -1415,6 +1108,7 @@
   case Instruction::ExtractValue:
   case Instruction::ShuffleVector:
   case Instruction::Call:
+  case Instruction::Switch:
     return getUserCost(I, CostKind);
   default:
     // We don't have any information on this instruction.
diff --git a/src/llvm-project/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp b/src/llvm-project/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 268acb6..20d718f 100644
--- a/src/llvm-project/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -379,7 +379,7 @@
     return AAResultBase::alias(LocA, LocB, AAQI);
 
   // Otherwise return a definitive result.
-  return NoAlias;
+  return AliasResult::NoAlias;
 }
 
 bool TypeBasedAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
@@ -738,7 +738,7 @@
   AU.setPreservesAll();
 }
 
-MDNode *AAMDNodes::ShiftTBAA(MDNode *MD, size_t Offset) {
+MDNode *AAMDNodes::shiftTBAA(MDNode *MD, size_t Offset) {
   // Fast path if there's no offset
   if (Offset == 0)
     return MD;
@@ -746,50 +746,18 @@
   if (!isStructPathTBAA(MD))
     return MD;
 
-  TBAAStructTagNode Tag(MD);
-  SmallVector<Metadata *, 5> Sub;
-  Sub.push_back(MD->getOperand(0));
-  Sub.push_back(MD->getOperand(1));
-  ConstantInt *InnerOffset = mdconst::extract<ConstantInt>(MD->getOperand(2));
-
-  if (Tag.isNewFormat()) {
-    ConstantInt *InnerSize = mdconst::extract<ConstantInt>(MD->getOperand(3));
-
-    if (InnerOffset->getZExtValue() + InnerSize->getZExtValue() <= Offset) {
-      return nullptr;
-    }
-
-    uint64_t NewSize = InnerSize->getZExtValue();
-    uint64_t NewOffset = InnerOffset->getZExtValue() - Offset;
-    if (InnerOffset->getZExtValue() < Offset) {
-      NewOffset = 0;
-      NewSize -= Offset - InnerOffset->getZExtValue();
-    }
-
-    Sub.push_back(ConstantAsMetadata::get(
-        ConstantInt::get(InnerOffset->getType(), NewOffset)));
-
-    Sub.push_back(ConstantAsMetadata::get(
-        ConstantInt::get(InnerSize->getType(), NewSize)));
-
-    // immutable type
-    if (MD->getNumOperands() >= 5)
-      Sub.push_back(MD->getOperand(4));
-  } else {
-    if (InnerOffset->getZExtValue() < Offset)
-      return nullptr;
-
-    Sub.push_back(ConstantAsMetadata::get(ConstantInt::get(
-        InnerOffset->getType(), InnerOffset->getZExtValue() - Offset)));
-
-    // immutable type
-    if (MD->getNumOperands() >= 4)
-      Sub.push_back(MD->getOperand(3));
-  }
-  return MDNode::get(MD->getContext(), Sub);
+  // The correct behavior here is to add the offset into the TBAA
+  // struct node offset. The base type, however may not have defined
+  // a type at this additional offset, resulting in errors. Since
+  // this method is only used within a given load/store access
+  // the offset provided is only used to subdivide the previous load
+  // maintaining the validity of the previous TBAA.
+  //
+  // This, however, should be revisited in the future.
+  return MD;
 }
 
-MDNode *AAMDNodes::ShiftTBAAStruct(MDNode *MD, size_t Offset) {
+MDNode *AAMDNodes::shiftTBAAStruct(MDNode *MD, size_t Offset) {
   // Fast path if there's no offset
   if (Offset == 0)
     return MD;
@@ -817,4 +785,4 @@
     Sub.push_back(MD->getOperand(i + 2));
   }
   return MDNode::get(MD->getContext(), Sub);
-}
\ No newline at end of file
+}
diff --git a/src/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp b/src/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp
index 8735d56..f015ba9 100644
--- a/src/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/TypeMetadataUtils.cpp
@@ -15,6 +15,7 @@
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/Module.h"
 
@@ -80,13 +81,9 @@
   const Module *M = CI->getParent()->getParent()->getParent();
 
   // Find llvm.assume intrinsics for this llvm.type.test call.
-  for (const Use &CIU : CI->uses()) {
-    if (auto *AssumeCI = dyn_cast<CallInst>(CIU.getUser())) {
-      Function *F = AssumeCI->getCalledFunction();
-      if (F && F->getIntrinsicID() == Intrinsic::assume)
-        Assumes.push_back(AssumeCI);
-    }
-  }
+  for (const Use &CIU : CI->uses())
+    if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
+      Assumes.push_back(Assume);
 
   // If we found any, search for virtual calls based on %p and add them to
   // DevirtCalls.
diff --git a/src/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp b/src/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp
index faa46537..8a34a34 100644
--- a/src/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/VFABIDemangling.cpp
@@ -454,7 +454,7 @@
   if (!M.getFunction(VectorName))
     return None;
 
-  const VFShape Shape({VF, IsScalable, Parameters});
+  const VFShape Shape({ElementCount::get(VF, IsScalable), Parameters});
   return VFInfo({Shape, std::string(ScalarName), std::string(VectorName), ISA});
 }
 
diff --git a/src/llvm-project/llvm/lib/Analysis/ValueTracking.cpp b/src/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
index 75486d3..522d218 100644
--- a/src/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/ValueTracking.cpp
@@ -26,6 +26,7 @@
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumeBundleQueries.h"
 #include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/EHPersonalities.h"
 #include "llvm/Analysis/GuardUtils.h"
 #include "llvm/Analysis/InstructionSimplify.h"
 #include "llvm/Analysis/Loads.h"
@@ -52,6 +53,7 @@
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Intrinsics.h"
 #include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
 #include "llvm/IR/IntrinsicsX86.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Metadata.h"
@@ -107,40 +109,13 @@
   // provide it currently.
   OptimizationRemarkEmitter *ORE;
 
-  /// Set of assumptions that should be excluded from further queries.
-  /// This is because of the potential for mutual recursion to cause
-  /// computeKnownBits to repeatedly visit the same assume intrinsic. The
-  /// classic case of this is assume(x = y), which will attempt to determine
-  /// bits in x from bits in y, which will attempt to determine bits in y from
-  /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
-  /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
-  /// (all of which can call computeKnownBits), and so on.
-  std::array<const Value *, MaxAnalysisRecursionDepth> Excluded;
-
   /// If true, it is safe to use metadata during simplification.
   InstrInfoQuery IIQ;
 
-  unsigned NumExcluded = 0;
-
   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
         const DominatorTree *DT, bool UseInstrInfo,
         OptimizationRemarkEmitter *ORE = nullptr)
       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
-
-  Query(const Query &Q, const Value *NewExcl)
-      : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
-        NumExcluded(Q.NumExcluded) {
-    Excluded = Q.Excluded;
-    Excluded[NumExcluded++] = NewExcl;
-    assert(NumExcluded <= Excluded.size());
-  }
-
-  bool isExcluded(const Value *Value) const {
-    if (NumExcluded == 0)
-      return false;
-    auto End = Excluded.begin() + NumExcluded;
-    return std::find(Excluded.begin(), End, Value) != End;
-  }
 };
 
 } // end anonymous namespace
@@ -161,6 +136,24 @@
   return nullptr;
 }
 
+static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
+  // If we've been provided with a context instruction, then use that (provided
+  // it has been inserted).
+  if (CxtI && CxtI->getParent())
+    return CxtI;
+
+  // If the value is really an already-inserted instruction, then use that.
+  CxtI = dyn_cast<Instruction>(V1);
+  if (CxtI && CxtI->getParent())
+    return CxtI;
+
+  CxtI = dyn_cast<Instruction>(V2);
+  if (CxtI && CxtI->getParent())
+    return CxtI;
+
+  return nullptr;
+}
+
 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
                                    const APInt &DemandedElts,
                                    APInt &DemandedLHS, APInt &DemandedRHS) {
@@ -283,7 +276,7 @@
   KnownBits RHSKnown(IT->getBitWidth());
   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
-  return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
+  return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
 }
 
 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) {
@@ -358,7 +351,7 @@
                            const Instruction *CxtI, const DominatorTree *DT,
                            bool UseInstrInfo) {
   return ::isKnownNonEqual(V1, V2, 0,
-                           Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
+                           Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
                                  UseInstrInfo, /*ORE=*/nullptr));
 }
 
@@ -444,7 +437,7 @@
     }
   }
 
-  Known = KnownBits::computeForMul(Known, Known2);
+  Known = KnownBits::mul(Known, Known2);
 
   // Only make use of no-wrap flags if we failed to compute the sign bit
   // directly.  This matters if the multiplication always overflows, in
@@ -519,27 +512,8 @@
 
 // Is this an intrinsic that cannot be speculated but also cannot trap?
 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
-  if (const CallInst *CI = dyn_cast<CallInst>(I))
-    if (Function *F = CI->getCalledFunction())
-      switch (F->getIntrinsicID()) {
-      default: break;
-      // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
-      case Intrinsic::assume:
-      case Intrinsic::sideeffect:
-      case Intrinsic::pseudoprobe:
-      case Intrinsic::dbg_declare:
-      case Intrinsic::dbg_value:
-      case Intrinsic::dbg_label:
-      case Intrinsic::invariant_start:
-      case Intrinsic::invariant_end:
-      case Intrinsic::lifetime_start:
-      case Intrinsic::lifetime_end:
-      case Intrinsic::experimental_noalias_scope_decl:
-      case Intrinsic::objectsize:
-      case Intrinsic::ptr_annotation:
-      case Intrinsic::var_annotation:
-        return true;
-      }
+  if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
+    return CI->isAssumeLikeIntrinsic();
 
   return false;
 }
@@ -570,8 +544,12 @@
     // The context comes first, but they're both in the same block.
     // Make sure there is nothing in between that might interrupt
     // the control flow, not even CxtI itself.
+    // We limit the scan distance between the assume and its context instruction
+    // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
+    // it can be adjusted if needed (could be turned into a cl::opt).
+    unsigned ScanLimit = 15;
     for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I)
-      if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
+      if (!isGuaranteedToTransferExecutionToSuccessor(&*I) || --ScanLimit == 0)
         return false;
 
     return !isEphemeralValueOf(Inv, CxtI);
@@ -629,8 +607,6 @@
     CallInst *I = cast<CallInst>(AssumeVH);
     assert(I->getFunction() == Q.CxtI->getFunction() &&
            "Got assumption for the wrong function!");
-    if (Q.isExcluded(I))
-      continue;
 
     // Warning: This loop can end up being somewhat performance sensitive.
     // We're running this loop for once for each value queried resulting in a
@@ -678,8 +654,6 @@
     CallInst *I = cast<CallInst>(AssumeVH);
     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
            "Got assumption for the wrong function!");
-    if (Q.isExcluded(I))
-      continue;
 
     // Warning: This loop can end up being somewhat performance sensitive.
     // We're running this loop for once for each value queried resulting in a
@@ -710,6 +684,15 @@
     if (!Cmp)
       continue;
 
+    // We are attempting to compute known bits for the operands of an assume.
+    // Do not try to use other assumptions for those recursive calls because
+    // that can lead to mutual recursion and a compile-time explosion.
+    // An example of the mutual recursion: computeKnownBits can call
+    // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
+    // and so on.
+    Query QueryNoAC = Q;
+    QueryNoAC.AC = nullptr;
+
     // Note that ptrtoint may change the bitwidth.
     Value *A, *B;
     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
@@ -724,7 +707,7 @@
       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         Known.Zero |= RHSKnown.Zero;
         Known.One  |= RHSKnown.One;
       // assume(v & b = a)
@@ -732,9 +715,9 @@
                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         KnownBits MaskKnown =
-            computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // For those bits in the mask that are known to be one, we can propagate
         // known bits from the RHS to V.
@@ -745,9 +728,9 @@
                                      m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         KnownBits MaskKnown =
-            computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // For those bits in the mask that are known to be one, we can propagate
         // inverted known bits from the RHS to V.
@@ -758,9 +741,9 @@
                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         KnownBits BKnown =
-            computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // For those bits in B that are known to be zero, we can propagate known
         // bits from the RHS to V.
@@ -771,9 +754,9 @@
                                      m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         KnownBits BKnown =
-            computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // For those bits in B that are known to be zero, we can propagate
         // inverted known bits from the RHS to V.
@@ -784,9 +767,9 @@
                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         KnownBits BKnown =
-            computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // For those bits in B that are known to be zero, we can propagate known
         // bits from the RHS to V. For those bits in B that are known to be one,
@@ -800,9 +783,9 @@
                                      m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         KnownBits BKnown =
-            computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // For those bits in B that are known to be zero, we can propagate
         // inverted known bits from the RHS to V. For those bits in B that are
@@ -816,7 +799,7 @@
                                      m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // For those bits in RHS that are known, we can propagate them to known
         // bits in V shifted to the right by C.
@@ -829,7 +812,7 @@
                                      m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         // For those bits in RHS that are known, we can propagate them inverted
         // to known bits in V shifted to the right by C.
         RHSKnown.One.lshrInPlace(C);
@@ -841,7 +824,7 @@
                                      m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         // For those bits in RHS that are known, we can propagate them to known
         // bits in V shifted to the right by C.
         Known.Zero |= RHSKnown.Zero << C;
@@ -851,7 +834,7 @@
                                      m_Value(A))) &&
                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
         // For those bits in RHS that are known, we can propagate them inverted
         // to known bits in V shifted to the right by C.
         Known.Zero |= RHSKnown.One  << C;
@@ -863,7 +846,7 @@
       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         if (RHSKnown.isNonNegative()) {
           // We know that the sign bit is zero.
@@ -876,7 +859,7 @@
       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
           // We know that the sign bit is zero.
@@ -889,7 +872,7 @@
       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         if (RHSKnown.isNegative()) {
           // We know that the sign bit is one.
@@ -902,7 +885,7 @@
       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
           // We know that the sign bit is one.
@@ -915,7 +898,7 @@
       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // Whatever high bits in c are zero are known to be zero.
         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
@@ -926,7 +909,7 @@
       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
         KnownBits RHSKnown =
-            computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth);
+            computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
 
         // If the RHS is known zero, then this assumption must be wrong (nothing
         // is unsigned less than zero). Signal a conflict and get out of here.
@@ -938,7 +921,7 @@
 
         // Whatever high bits in c are zero are known to be zero (if c is a power
         // of 2, then one more).
-        if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
+        if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
         else
           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
@@ -1200,6 +1183,47 @@
       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
       break;
     }
+
+    // Handle cast from vector integer type to scalar or vector integer.
+    auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
+    if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
+        !I->getType()->isIntOrIntVectorTy())
+      break;
+
+    // Look through a cast from narrow vector elements to wider type.
+    // Examples: v4i32 -> v2i64, v3i8 -> v24
+    unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
+    if (BitWidth % SubBitWidth == 0) {
+      // Known bits are automatically intersected across demanded elements of a
+      // vector. So for example, if a bit is computed as known zero, it must be
+      // zero across all demanded elements of the vector.
+      //
+      // For this bitcast, each demanded element of the output is sub-divided
+      // across a set of smaller vector elements in the source vector. To get
+      // the known bits for an entire element of the output, compute the known
+      // bits for each sub-element sequentially. This is done by shifting the
+      // one-set-bit demanded elements parameter across the sub-elements for
+      // consecutive calls to computeKnownBits. We are using the demanded
+      // elements parameter as a mask operator.
+      //
+      // The known bits of each sub-element are then inserted into place
+      // (dependent on endian) to form the full result of known bits.
+      unsigned NumElts = DemandedElts.getBitWidth();
+      unsigned SubScale = BitWidth / SubBitWidth;
+      APInt SubDemandedElts = APInt::getNullValue(NumElts * SubScale);
+      for (unsigned i = 0; i != NumElts; ++i) {
+        if (DemandedElts[i])
+          SubDemandedElts.setBit(i * SubScale);
+      }
+
+      KnownBits KnownSrc(SubBitWidth);
+      for (unsigned i = 0; i != SubScale; ++i) {
+        computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
+                         Depth + 1, Q);
+        unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
+        Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
+      }
+    }
     break;
   }
   case Instruction::SExt: {
@@ -1229,6 +1253,10 @@
     };
     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
                                       KF);
+    // Trailing zeros of a right-shifted constant never decrease.
+    const APInt *C;
+    if (match(I->getOperand(0), m_APInt(C)))
+      Known.Zero.setLowBits(C->countTrailingZeros());
     break;
   }
   case Instruction::LShr: {
@@ -1237,6 +1265,10 @@
     };
     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
                                       KF);
+    // Leading zeros of a left-shifted constant never decrease.
+    const APInt *C;
+    if (match(I->getOperand(0), m_APInt(C)))
+      Known.Zero.setHighBits(C->countLeadingZeros());
     break;
   }
   case Instruction::AShr: {
@@ -1339,7 +1371,7 @@
         ScalingFactor =
             KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
       }
-      IndexBits = KnownBits::computeForMul(IndexBits, ScalingFactor);
+      IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
 
       // If the offsets have a different width from the pointer, according
       // to the language reference we need to sign-extend or truncate them
@@ -1360,91 +1392,115 @@
   }
   case Instruction::PHI: {
     const PHINode *P = cast<PHINode>(I);
-    // Handle the case of a simple two-predecessor recurrence PHI.
-    // There's a lot more that could theoretically be done here, but
-    // this is sufficient to catch some interesting cases.
-    if (P->getNumIncomingValues() == 2) {
-      for (unsigned i = 0; i != 2; ++i) {
-        Value *L = P->getIncomingValue(i);
-        Value *R = P->getIncomingValue(!i);
-        Instruction *RInst = P->getIncomingBlock(!i)->getTerminator();
-        Instruction *LInst = P->getIncomingBlock(i)->getTerminator();
-        Operator *LU = dyn_cast<Operator>(L);
-        if (!LU)
-          continue;
-        unsigned Opcode = LU->getOpcode();
-        // Check for operations that have the property that if
-        // both their operands have low zero bits, the result
-        // will have low zero bits.
-        if (Opcode == Instruction::Add ||
-            Opcode == Instruction::Sub ||
-            Opcode == Instruction::And ||
-            Opcode == Instruction::Or ||
-            Opcode == Instruction::Mul) {
-          Value *LL = LU->getOperand(0);
-          Value *LR = LU->getOperand(1);
-          // Find a recurrence.
-          if (LL == I)
-            L = LR;
-          else if (LR == I)
-            L = LL;
-          else
-            continue; // Check for recurrence with L and R flipped.
+    BinaryOperator *BO = nullptr;
+    Value *R = nullptr, *L = nullptr;
+    if (matchSimpleRecurrence(P, BO, R, L)) {
+      // Handle the case of a simple two-predecessor recurrence PHI.
+      // There's a lot more that could theoretically be done here, but
+      // this is sufficient to catch some interesting cases.
+      unsigned Opcode = BO->getOpcode();
 
-          // Change the context instruction to the "edge" that flows into the
-          // phi. This is important because that is where the value is actually
-          // "evaluated" even though it is used later somewhere else. (see also
-          // D69571).
-          Query RecQ = Q;
+      // If this is a shift recurrence, we know the bits being shifted in.
+      // We can combine that with information about the start value of the
+      // recurrence to conclude facts about the result.
+      if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
+           Opcode == Instruction::Shl) &&
+          BO->getOperand(0) == I) {
 
-          // Ok, we have a PHI of the form L op= R. Check for low
-          // zero bits.
-          RecQ.CxtI = RInst;
-          computeKnownBits(R, Known2, Depth + 1, RecQ);
+        // We have matched a recurrence of the form:
+        // %iv = [R, %entry], [%iv.next, %backedge]
+        // %iv.next = shift_op %iv, L
 
-          // We need to take the minimum number of known bits
-          KnownBits Known3(BitWidth);
-          RecQ.CxtI = LInst;
-          computeKnownBits(L, Known3, Depth + 1, RecQ);
+        // Recurse with the phi context to avoid concern about whether facts
+        // inferred hold at original context instruction.  TODO: It may be
+        // correct to use the original context.  IF warranted, explore and
+        // add sufficient tests to cover.
+        Query RecQ = Q;
+        RecQ.CxtI = P;
+        computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
+        switch (Opcode) {
+        case Instruction::Shl:
+          // A shl recurrence will only increase the tailing zeros
+          Known.Zero.setLowBits(Known2.countMinTrailingZeros());
+          break;
+        case Instruction::LShr:
+          // A lshr recurrence will preserve the leading zeros of the
+          // start value
+          Known.Zero.setHighBits(Known2.countMinLeadingZeros());
+          break;
+        case Instruction::AShr:
+          // An ashr recurrence will extend the initial sign bit
+          Known.Zero.setHighBits(Known2.countMinLeadingZeros());
+          Known.One.setHighBits(Known2.countMinLeadingOnes());
+          break;
+        };
+      }
 
-          Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
-                                         Known3.countMinTrailingZeros()));
+      // Check for operations that have the property that if
+      // both their operands have low zero bits, the result
+      // will have low zero bits.
+      if (Opcode == Instruction::Add ||
+          Opcode == Instruction::Sub ||
+          Opcode == Instruction::And ||
+          Opcode == Instruction::Or ||
+          Opcode == Instruction::Mul) {
+        // Change the context instruction to the "edge" that flows into the
+        // phi. This is important because that is where the value is actually
+        // "evaluated" even though it is used later somewhere else. (see also
+        // D69571).
+        Query RecQ = Q;
 
-          auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
-          if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
-            // If initial value of recurrence is nonnegative, and we are adding
-            // a nonnegative number with nsw, the result can only be nonnegative
-            // or poison value regardless of the number of times we execute the
-            // add in phi recurrence. If initial value is negative and we are
-            // adding a negative number with nsw, the result can only be
-            // negative or poison value. Similar arguments apply to sub and mul.
-            //
-            // (add non-negative, non-negative) --> non-negative
-            // (add negative, negative) --> negative
-            if (Opcode == Instruction::Add) {
-              if (Known2.isNonNegative() && Known3.isNonNegative())
-                Known.makeNonNegative();
-              else if (Known2.isNegative() && Known3.isNegative())
-                Known.makeNegative();
-            }
+        unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
+        Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
+        Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
 
-            // (sub nsw non-negative, negative) --> non-negative
-            // (sub nsw negative, non-negative) --> negative
-            else if (Opcode == Instruction::Sub && LL == I) {
-              if (Known2.isNonNegative() && Known3.isNegative())
-                Known.makeNonNegative();
-              else if (Known2.isNegative() && Known3.isNonNegative())
-                Known.makeNegative();
-            }
+        // Ok, we have a PHI of the form L op= R. Check for low
+        // zero bits.
+        RecQ.CxtI = RInst;
+        computeKnownBits(R, Known2, Depth + 1, RecQ);
 
-            // (mul nsw non-negative, non-negative) --> non-negative
-            else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
-                     Known3.isNonNegative())
+        // We need to take the minimum number of known bits
+        KnownBits Known3(BitWidth);
+        RecQ.CxtI = LInst;
+        computeKnownBits(L, Known3, Depth + 1, RecQ);
+
+        Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
+                                       Known3.countMinTrailingZeros()));
+
+        auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
+        if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
+          // If initial value of recurrence is nonnegative, and we are adding
+          // a nonnegative number with nsw, the result can only be nonnegative
+          // or poison value regardless of the number of times we execute the
+          // add in phi recurrence. If initial value is negative and we are
+          // adding a negative number with nsw, the result can only be
+          // negative or poison value. Similar arguments apply to sub and mul.
+          //
+          // (add non-negative, non-negative) --> non-negative
+          // (add negative, negative) --> negative
+          if (Opcode == Instruction::Add) {
+            if (Known2.isNonNegative() && Known3.isNonNegative())
               Known.makeNonNegative();
+            else if (Known2.isNegative() && Known3.isNegative())
+              Known.makeNegative();
           }
 
-          break;
+          // (sub nsw non-negative, negative) --> non-negative
+          // (sub nsw negative, non-negative) --> negative
+          else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
+            if (Known2.isNonNegative() && Known3.isNegative())
+              Known.makeNonNegative();
+            else if (Known2.isNegative() && Known3.isNonNegative())
+              Known.makeNegative();
+          }
+
+          // (mul nsw non-negative, non-negative) --> non-negative
+          else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
+                   Known3.isNonNegative())
+            Known.makeNonNegative();
         }
+
+        break;
       }
     }
 
@@ -1626,6 +1682,13 @@
       case Intrinsic::x86_sse42_crc32_64_64:
         Known.Zero.setBitsFrom(32);
         break;
+      case Intrinsic::riscv_vsetvli:
+      case Intrinsic::riscv_vsetvlimax:
+        // Assume that VL output is positive and would fit in an int32_t.
+        // TODO: VLEN might be capped at 16 bits in a future V spec update.
+        if (BitWidth >= 32)
+          Known.Zero.setBitsFrom(31);
+        break;
       }
     }
     break;
@@ -1942,6 +2005,12 @@
     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
 
+  // Peek through min/max.
+  if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
+    return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
+           isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
+  }
+
   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
     // A power of two and'd with anything is a power of two or zero.
     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
@@ -2172,6 +2241,36 @@
   return true;
 }
 
+/// Try to detect a recurrence that monotonically increases/decreases from a
+/// non-zero starting value. These are common as induction variables.
+static bool isNonZeroRecurrence(const PHINode *PN) {
+  BinaryOperator *BO = nullptr;
+  Value *Start = nullptr, *Step = nullptr;
+  const APInt *StartC, *StepC;
+  if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
+      !match(Start, m_APInt(StartC)) || StartC->isNullValue())
+    return false;
+
+  switch (BO->getOpcode()) {
+  case Instruction::Add:
+    // Starting from non-zero and stepping away from zero can never wrap back
+    // to zero.
+    return BO->hasNoUnsignedWrap() ||
+           (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
+            StartC->isNegative() == StepC->isNegative());
+  case Instruction::Mul:
+    return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
+           match(Step, m_APInt(StepC)) && !StepC->isNullValue();
+  case Instruction::Shl:
+    return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
+  case Instruction::AShr:
+  case Instruction::LShr:
+    return BO->isExact();
+  default:
+    return false;
+  }
+}
+
 /// Return true if the given value is known to be non-zero when defined. For
 /// vectors, return true if every demanded element is known to be non-zero when
 /// defined. For pointers, if the context instruction and dominator tree are
@@ -2413,24 +2512,9 @@
   }
   // PHI
   else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
-    // Try and detect a recurrence that monotonically increases from a
-    // starting value, as these are common as induction variables.
-    if (PN->getNumIncomingValues() == 2) {
-      Value *Start = PN->getIncomingValue(0);
-      Value *Induction = PN->getIncomingValue(1);
-      if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
-        std::swap(Start, Induction);
-      if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
-        if (!C->isZero() && !C->isNegative()) {
-          ConstantInt *X;
-          if (Q.IIQ.UseInstrInfo &&
-              (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
-               match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
-              !X->isNegative())
-            return true;
-        }
-      }
-    }
+    if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
+      return true;
+
     // Check if all incoming values are non-zero using recursion.
     Query RecQ = Q;
     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
@@ -2479,6 +2563,113 @@
   return isKnownNonZero(V, DemandedElts, Depth, Q);
 }
 
+/// If the pair of operators are the same invertible function, return the
+/// the operands of the function corresponding to each input. Otherwise,
+/// return None.  An invertible function is one that is 1-to-1 and maps
+/// every input value to exactly one output value.  This is equivalent to
+/// saying that Op1 and Op2 are equal exactly when the specified pair of
+/// operands are equal, (except that Op1 and Op2 may be poison more often.)
+static Optional<std::pair<Value*, Value*>>
+getInvertibleOperands(const Operator *Op1,
+                      const Operator *Op2) {
+  if (Op1->getOpcode() != Op2->getOpcode())
+    return None;
+
+  auto getOperands = [&](unsigned OpNum) -> auto {
+    return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
+  };
+
+  switch (Op1->getOpcode()) {
+  default:
+    break;
+  case Instruction::Add:
+  case Instruction::Sub:
+    if (Op1->getOperand(0) == Op2->getOperand(0))
+      return getOperands(1);
+    if (Op1->getOperand(1) == Op2->getOperand(1))
+      return getOperands(0);
+    break;
+  case Instruction::Mul: {
+    // invertible if A * B == (A * B) mod 2^N where A, and B are integers
+    // and N is the bitwdith.  The nsw case is non-obvious, but proven by
+    // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
+    auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
+    auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
+    if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
+        (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
+      break;
+
+    // Assume operand order has been canonicalized
+    if (Op1->getOperand(1) == Op2->getOperand(1) &&
+        isa<ConstantInt>(Op1->getOperand(1)) &&
+        !cast<ConstantInt>(Op1->getOperand(1))->isZero())
+      return getOperands(0);
+    break;
+  }
+  case Instruction::Shl: {
+    // Same as multiplies, with the difference that we don't need to check
+    // for a non-zero multiply. Shifts always multiply by non-zero.
+    auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
+    auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
+    if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
+        (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
+      break;
+
+    if (Op1->getOperand(1) == Op2->getOperand(1))
+      return getOperands(0);
+    break;
+  }
+  case Instruction::AShr:
+  case Instruction::LShr: {
+    auto *PEO1 = cast<PossiblyExactOperator>(Op1);
+    auto *PEO2 = cast<PossiblyExactOperator>(Op2);
+    if (!PEO1->isExact() || !PEO2->isExact())
+      break;
+
+    if (Op1->getOperand(1) == Op2->getOperand(1))
+      return getOperands(0);
+    break;
+  }
+  case Instruction::SExt:
+  case Instruction::ZExt:
+    if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
+      return getOperands(0);
+    break;
+  case Instruction::PHI: {
+    const PHINode *PN1 = cast<PHINode>(Op1);
+    const PHINode *PN2 = cast<PHINode>(Op2);
+
+    // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
+    // are a single invertible function of the start values? Note that repeated
+    // application of an invertible function is also invertible
+    BinaryOperator *BO1 = nullptr;
+    Value *Start1 = nullptr, *Step1 = nullptr;
+    BinaryOperator *BO2 = nullptr;
+    Value *Start2 = nullptr, *Step2 = nullptr;
+    if (PN1->getParent() != PN2->getParent() ||
+        !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
+        !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
+      break;
+
+    auto Values = getInvertibleOperands(cast<Operator>(BO1),
+                                        cast<Operator>(BO2));
+    if (!Values)
+       break;
+
+    // We have to be careful of mutually defined recurrences here.  Ex:
+    // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
+    // * X_i = Y_i = X_(i-1) OP Y_(i-1)
+    // The invertibility of these is complicated, and not worth reasoning
+    // about (yet?).
+    if (Values->first != PN1 || Values->second != PN2)
+      break;
+
+    return std::make_pair(Start1, Start2);
+  }
+  }
+  return None;
+}
+
 /// Return true if V2 == V1 + X, where X is known non-zero.
 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
                            const Query &Q) {
@@ -2495,6 +2686,62 @@
   return isKnownNonZero(Op, Depth + 1, Q);
 }
 
+/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
+/// the multiplication is nuw or nsw.
+static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
+                          const Query &Q) {
+  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
+    const APInt *C;
+    return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
+           (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
+           !C->isNullValue() && !C->isOneValue() &&
+           isKnownNonZero(V1, Depth + 1, Q);
+  }
+  return false;
+}
+
+/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
+/// the shift is nuw or nsw.
+static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
+                          const Query &Q) {
+  if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
+    const APInt *C;
+    return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
+           (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
+           !C->isNullValue() && isKnownNonZero(V1, Depth + 1, Q);
+  }
+  return false;
+}
+
+static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
+                           unsigned Depth, const Query &Q) {
+  // Check two PHIs are in same block.
+  if (PN1->getParent() != PN2->getParent())
+    return false;
+
+  SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
+  bool UsedFullRecursion = false;
+  for (const BasicBlock *IncomBB : PN1->blocks()) {
+    if (!VisitedBBs.insert(IncomBB).second)
+      continue; // Don't reprocess blocks that we have dealt with already.
+    const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
+    const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
+    const APInt *C1, *C2;
+    if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
+      continue;
+
+    // Only one pair of phi operands is allowed for full recursion.
+    if (UsedFullRecursion)
+      return false;
+
+    Query RecQ = Q;
+    RecQ.CxtI = IncomBB->getTerminator();
+    if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
+      return false;
+    UsedFullRecursion = true;
+  }
+  return true;
+}
 
 /// Return true if it is known that V1 != V2.
 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
@@ -2514,48 +2761,27 @@
   auto *O1 = dyn_cast<Operator>(V1);
   auto *O2 = dyn_cast<Operator>(V2);
   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
-    switch (O1->getOpcode()) {
-    default: break;
-    case Instruction::Add:
-    case Instruction::Sub:
-      // Assume operand order has been canonicalized
-      if (O1->getOperand(0) == O2->getOperand(0))
-        return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1),
-                               Depth + 1, Q);
-      if (O1->getOperand(1) == O2->getOperand(1))
-        return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
-                               Depth + 1, Q);
-      break;
-    case Instruction::Mul: {
-      // invertible if A * B == (A * B) mod 2^N where A, and B are integers
-      // and N is the bitwdith.  The nsw case is non-obvious, but proven by
-      // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
-      auto *OBO1 = cast<OverflowingBinaryOperator>(O1);
-      auto *OBO2 = cast<OverflowingBinaryOperator>(O2);
-      if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
-          (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
-        break;
+    if (auto Values = getInvertibleOperands(O1, O2))
+      return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
 
-      // Assume operand order has been canonicalized
-      if (O1->getOperand(1) == O2->getOperand(1) &&
-          isa<ConstantInt>(O1->getOperand(1)) &&
-          !cast<ConstantInt>(O1->getOperand(1))->isZero())
-        return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
-                               Depth + 1, Q);
-      break;
-    }
-    case Instruction::SExt:
-    case Instruction::ZExt:
-      if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType())
-        return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
-                               Depth + 1, Q);
-      break;
+    if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
+      const PHINode *PN2 = cast<PHINode>(V2);
+      // FIXME: This is missing a generalization to handle the case where one is
+      // a PHI and another one isn't.
+      if (isNonEqualPHIs(PN1, PN2, Depth, Q))
+        return true;
     };
   }
-  
+
   if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
     return true;
 
+  if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
+    return true;
+
+  if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
+    return true;
+
   if (V1->getType()->isIntOrIntVectorTy()) {
     // Are any known bits in V1 contradictory to known bits in V2? If V1
     // has a known zero where V2 has a known one, they must not be equal.
@@ -2728,6 +2954,8 @@
     }
 
     case Instruction::SRem: {
+      Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
+
       const APInt *Denominator;
       // srem X, C -> we know that the result is within [-C+1,C) when C is a
       // positive constant.  This let us put a lower bound on the number of sign
@@ -2735,30 +2963,25 @@
       if (match(U->getOperand(1), m_APInt(Denominator))) {
 
         // Ignore non-positive denominator.
-        if (!Denominator->isStrictlyPositive())
-          break;
+        if (Denominator->isStrictlyPositive()) {
+          // Calculate the leading sign bit constraints by examining the
+          // denominator.  Given that the denominator is positive, there are two
+          // cases:
+          //
+          //  1. The numerator is positive. The result range is [0,C) and
+          //     [0,C) u< (1 << ceilLogBase2(C)).
+          //
+          //  2. The numerator is negative. Then the result range is (-C,0] and
+          //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
+          //
+          // Thus a lower bound on the number of sign bits is `TyBits -
+          // ceilLogBase2(C)`.
 
-        // Calculate the incoming numerator bits. SRem by a positive constant
-        // can't lower the number of sign bits.
-        unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
-
-        // Calculate the leading sign bit constraints by examining the
-        // denominator.  Given that the denominator is positive, there are two
-        // cases:
-        //
-        //  1. the numerator is positive. The result range is [0,C) and [0,C) u<
-        //     (1 << ceilLogBase2(C)).
-        //
-        //  2. the numerator is negative. Then the result range is (-C,0] and
-        //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
-        //
-        // Thus a lower bound on the number of sign bits is `TyBits -
-        // ceilLogBase2(C)`.
-
-        unsigned ResBits = TyBits - Denominator->ceilLogBase2();
-        return std::max(NumrBits, ResBits);
+          unsigned ResBits = TyBits - Denominator->ceilLogBase2();
+          Tmp = std::max(Tmp, ResBits);
+        }
       }
-      break;
+      return Tmp;
     }
 
     case Instruction::AShr: {
@@ -4130,18 +4353,18 @@
   return true;
 }
 
-Value *llvm::getUnderlyingObject(Value *V, unsigned MaxLookup) {
+const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
   if (!V->getType()->isPointerTy())
     return V;
   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
-    if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
+    if (auto *GEP = dyn_cast<GEPOperator>(V)) {
       V = GEP->getPointerOperand();
     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
       V = cast<Operator>(V)->getOperand(0);
       if (!V->getType()->isPointerTy())
         return V;
-    } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
+    } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
       if (GA->isInterposable())
         return V;
       V = GA->getAliasee();
@@ -4359,7 +4582,8 @@
 
 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
                                         const Instruction *CtxI,
-                                        const DominatorTree *DT) {
+                                        const DominatorTree *DT,
+                                        const TargetLibraryInfo *TLI) {
   const Operator *Inst = dyn_cast<Operator>(V);
   if (!Inst)
     return false;
@@ -4406,7 +4630,7 @@
     const DataLayout &DL = LI->getModule()->getDataLayout();
     return isDereferenceableAndAlignedPointer(
         LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
-        DL, CtxI, DT);
+        DL, CtxI, DT, TLI);
   }
   case Instruction::Call: {
     auto *CI = cast<const CallInst>(Inst);
@@ -4746,6 +4970,20 @@
     // destination type.
     return true;
   case Instruction::Call:
+    if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
+      switch (II->getIntrinsicID()) {
+      // TODO: Add more intrinsics.
+      case Intrinsic::ctpop:
+      case Intrinsic::sadd_with_overflow:
+      case Intrinsic::ssub_with_overflow:
+      case Intrinsic::smul_with_overflow:
+      case Intrinsic::uadd_with_overflow:
+      case Intrinsic::usub_with_overflow:
+      case Intrinsic::umul_with_overflow:
+        return false;
+      }
+    }
+    LLVM_FALLTHROUGH;
   case Instruction::CallBr:
   case Instruction::Invoke: {
     const auto *CB = cast<CallBase>(Op);
@@ -4814,11 +5052,24 @@
   if (Depth >= MaxDepth)
     return false;
 
-  const auto *I = dyn_cast<Instruction>(V);
-  if (I && propagatesPoison(cast<Operator>(I))) {
-    return any_of(I->operands(), [=](const Value *Op) {
-      return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
-    });
+  if (const auto *I = dyn_cast<Instruction>(V)) {
+    if (propagatesPoison(cast<Operator>(I)))
+      return any_of(I->operands(), [=](const Value *Op) {
+        return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
+      });
+
+    // 'select ValAssumedPoison, _, _' is poison.
+    if (const auto *SI = dyn_cast<SelectInst>(I))
+      return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
+                                   Depth + 1);
+    // V  = extractvalue V0, idx
+    // V2 = extractvalue V0, idx2
+    // V0's elements are all poison or not. (e.g., add_with_overflow)
+    const WithOverflowInst *II;
+    if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
+        (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
+         llvm::is_contained(II->arg_operands(), ValAssumedPoison)))
+      return true;
   }
   return false;
 }
@@ -5023,6 +5274,22 @@
   if (isa<UnreachableInst>(I))
     return false;
 
+  // Note: Do not add new checks here; instead, change Instruction::mayThrow or
+  // Instruction::willReturn.
+  //
+  // FIXME: Move this check into Instruction::willReturn.
+  if (isa<CatchPadInst>(I)) {
+    switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
+    default:
+      // A catchpad may invoke exception object constructors and such, which
+      // in some languages can be arbitrary code, so be conservative by default.
+      return false;
+    case EHPersonality::CoreCLR:
+      // For CoreCLR, it just involves a type test.
+      return true;
+    }
+  }
+
   // An instruction that returns without throwing must transfer control flow
   // to a successor.
   return !I->mayThrow() && I->willReturn();
@@ -5031,8 +5298,8 @@
 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
   // TODO: This is slightly conservative for invoke instruction since exiting
   // via an exception *is* normal control for them.
-  for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
-    if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
+  for (const Instruction &I : *BB)
+    if (!isGuaranteedToTransferExecutionToSuccessor(&I))
       return false;
   return true;
 }
@@ -5057,9 +5324,27 @@
   case Instruction::Freeze:
   case Instruction::Select:
   case Instruction::PHI:
-  case Instruction::Call:
   case Instruction::Invoke:
     return false;
+  case Instruction::Call:
+    if (auto *II = dyn_cast<IntrinsicInst>(I)) {
+      switch (II->getIntrinsicID()) {
+      // TODO: Add more intrinsics.
+      case Intrinsic::sadd_with_overflow:
+      case Intrinsic::ssub_with_overflow:
+      case Intrinsic::smul_with_overflow:
+      case Intrinsic::uadd_with_overflow:
+      case Intrinsic::usub_with_overflow:
+      case Intrinsic::umul_with_overflow:
+        // If an input is a vector containing a poison element, the
+        // two output vectors (calculated results, overflow bits)'
+        // corresponding lanes are poison.
+        return true;
+      case Intrinsic::ctpop:
+        return true;
+      }
+    }
+    return false;
   case Instruction::ICmp:
   case Instruction::FCmp:
   case Instruction::GetElementPtr:
@@ -5073,8 +5358,8 @@
   }
 }
 
-void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
-                                     SmallPtrSetImpl<const Value *> &Operands) {
+void llvm::getGuaranteedWellDefinedOps(
+    const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
   switch (I->getOpcode()) {
     case Instruction::Store:
       Operands.insert(cast<StoreInst>(I)->getPointerOperand());
@@ -5084,6 +5369,8 @@
       Operands.insert(cast<LoadInst>(I)->getPointerOperand());
       break;
 
+    // Since dereferenceable attribute imply noundef, atomic operations
+    // also implicitly have noundef pointers too
     case Instruction::AtomicCmpXchg:
       Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
       break;
@@ -5092,20 +5379,14 @@
       Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
       break;
 
-    case Instruction::UDiv:
-    case Instruction::SDiv:
-    case Instruction::URem:
-    case Instruction::SRem:
-      Operands.insert(I->getOperand(1));
-      break;
-
     case Instruction::Call:
     case Instruction::Invoke: {
       const CallBase *CB = cast<CallBase>(I);
       if (CB->isIndirectCall())
         Operands.insert(CB->getCalledOperand());
       for (unsigned i = 0; i < CB->arg_size(); ++i) {
-        if (CB->paramHasAttr(i, Attribute::NoUndef))
+        if (CB->paramHasAttr(i, Attribute::NoUndef) ||
+            CB->paramHasAttr(i, Attribute::Dereferenceable))
           Operands.insert(CB->getArgOperand(i));
       }
       break;
@@ -5116,6 +5397,23 @@
   }
 }
 
+void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
+                                     SmallPtrSetImpl<const Value *> &Operands) {
+  getGuaranteedWellDefinedOps(I, Operands);
+  switch (I->getOpcode()) {
+  // Divisors of these operations are allowed to be partially undef.
+  case Instruction::UDiv:
+  case Instruction::SDiv:
+  case Instruction::URem:
+  case Instruction::SRem:
+    Operands.insert(I->getOperand(1));
+    break;
+
+  default:
+    break;
+  }
+}
+
 bool llvm::mustTriggerUB(const Instruction *I,
                          const SmallSet<const Value *, 16>& KnownPoison) {
   SmallPtrSet<const Value *, 4> NonPoisonOps;
@@ -5156,11 +5454,9 @@
   BasicBlock::const_iterator End = BB->end();
 
   if (!PoisonOnly) {
-    // Be conservative & just check whether a value is passed to a noundef
-    // argument.
-    // Instructions that raise UB with a poison operand are well-defined
-    // or have unclear semantics when the input is partially undef.
-    // For example, 'udiv x, (undef | 1)' isn't UB.
+    // Since undef does not propagate eagerly, be conservative & just check
+    // whether a value is directly passed to an instruction that must take
+    // well-defined operands.
 
     for (auto &I : make_range(Begin, End)) {
       if (isa<DbgInfoIntrinsic>(I))
@@ -5168,13 +5464,11 @@
       if (--ScanLimit == 0)
         break;
 
-      if (const auto *CB = dyn_cast<CallBase>(&I)) {
-        for (unsigned i = 0; i < CB->arg_size(); ++i) {
-          if (CB->paramHasAttr(i, Attribute::NoUndef) &&
-              CB->getArgOperand(i) == V)
-            return true;
-        }
-      }
+      SmallPtrSet<const Value *, 4> WellDefinedOps;
+      getGuaranteedWellDefinedOps(&I, WellDefinedOps);
+      if (WellDefinedOps.contains(V))
+        return true;
+
       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
         break;
     }
@@ -5210,16 +5504,12 @@
         for_each(I.users(), Propagate);
     }
 
-    if (auto *NextBB = BB->getSingleSuccessor()) {
-      if (Visited.insert(NextBB).second) {
-        BB = NextBB;
-        Begin = BB->getFirstNonPHI()->getIterator();
-        End = BB->end();
-        continue;
-      }
-    }
+    BB = BB->getSingleSuccessor();
+    if (!BB || !Visited.insert(BB).second)
+      break;
 
-    break;
+    Begin = BB->getFirstNonPHI()->getIterator();
+    End = BB->end();
   }
   return false;
 }
@@ -5949,6 +6239,16 @@
   llvm_unreachable("unhandled!");
 }
 
+Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
+  switch (MinMaxID) {
+  case Intrinsic::smax: return Intrinsic::smin;
+  case Intrinsic::smin: return Intrinsic::smax;
+  case Intrinsic::umax: return Intrinsic::umin;
+  case Intrinsic::umin: return Intrinsic::umax;
+  default: llvm_unreachable("Unexpected intrinsic");
+  }
+}
+
 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
   return getMinMaxPred(getInverseMinMaxFlavor(SPF));
 }
@@ -5993,6 +6293,71 @@
   return {Intrinsic::not_intrinsic, false};
 }
 
+bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
+                                 Value *&Start, Value *&Step) {
+  // Handle the case of a simple two-predecessor recurrence PHI.
+  // There's a lot more that could theoretically be done here, but
+  // this is sufficient to catch some interesting cases.
+  if (P->getNumIncomingValues() != 2)
+    return false;
+
+  for (unsigned i = 0; i != 2; ++i) {
+    Value *L = P->getIncomingValue(i);
+    Value *R = P->getIncomingValue(!i);
+    Operator *LU = dyn_cast<Operator>(L);
+    if (!LU)
+      continue;
+    unsigned Opcode = LU->getOpcode();
+
+    switch (Opcode) {
+    default:
+      continue;
+    // TODO: Expand list -- xor, div, gep, uaddo, etc..
+    case Instruction::LShr:
+    case Instruction::AShr:
+    case Instruction::Shl:
+    case Instruction::Add:
+    case Instruction::Sub:
+    case Instruction::And:
+    case Instruction::Or:
+    case Instruction::Mul: {
+      Value *LL = LU->getOperand(0);
+      Value *LR = LU->getOperand(1);
+      // Find a recurrence.
+      if (LL == P)
+        L = LR;
+      else if (LR == P)
+        L = LL;
+      else
+        continue; // Check for recurrence with L and R flipped.
+
+      break; // Match!
+    }
+    };
+
+    // We have matched a recurrence of the form:
+    //   %iv = [R, %entry], [%iv.next, %backedge]
+    //   %iv.next = binop %iv, L
+    // OR
+    //   %iv = [R, %entry], [%iv.next, %backedge]
+    //   %iv.next = binop L, %iv
+    BO = cast<BinaryOperator>(LU);
+    Start = R;
+    Step = L;
+    return true;
+  }
+  return false;
+}
+
+bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
+                                 Value *&Start, Value *&Step) {
+  BinaryOperator *BO = nullptr;
+  P = dyn_cast<PHINode>(I->getOperand(0));
+  if (!P)
+    P = dyn_cast<PHINode>(I->getOperand(1));
+  return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
+}
+
 /// Return true if "icmp Pred LHS RHS" is always true.
 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
                             const Value *RHS, const DataLayout &DL,
@@ -6117,8 +6482,7 @@
                                  const ConstantInt *C2) {
   ConstantRange DomCR =
       ConstantRange::makeExactICmpRegion(APred, C1->getValue());
-  ConstantRange CR =
-      ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue());
+  ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2->getValue());
   ConstantRange Intersection = DomCR.intersectWith(CR);
   ConstantRange Difference = DomCR.difference(CR);
   if (Intersection.isEmptySet())
diff --git a/src/llvm-project/llvm/lib/Analysis/VectorUtils.cpp b/src/llvm-project/llvm/lib/Analysis/VectorUtils.cpp
index 9a4c96b..0a14a14 100644
--- a/src/llvm-project/llvm/lib/Analysis/VectorUtils.cpp
+++ b/src/llvm-project/llvm/lib/Analysis/VectorUtils.cpp
@@ -114,6 +114,16 @@
   }
 }
 
+bool llvm::hasVectorInstrinsicOverloadedScalarOpd(Intrinsic::ID ID,
+                                                  unsigned ScalarOpdIdx) {
+  switch (ID) {
+  case Intrinsic::powi:
+    return (ScalarOpdIdx == 1);
+  default:
+    return false;
+  }
+}
+
 /// Returns intrinsic ID for call.
 /// For the input call instruction it finds mapping intrinsic and returns
 /// its ID, in case it does not found it return not_intrinsic.
@@ -586,8 +596,8 @@
 
   for (auto I = ECs.begin(), E = ECs.end(); I != E; ++I) {
     uint64_t LeaderDemandedBits = 0;
-    for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
-      LeaderDemandedBits |= DBits[*MI];
+    for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
+      LeaderDemandedBits |= DBits[M];
 
     uint64_t MinBW = (sizeof(LeaderDemandedBits) * 8) -
                      llvm::countLeadingZeros(LeaderDemandedBits);
@@ -600,22 +610,22 @@
     // indvars.
     // If we are required to shrink a PHI, abandon this entire equivalence class.
     bool Abort = false;
-    for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI)
-      if (isa<PHINode>(*MI) && MinBW < (*MI)->getType()->getScalarSizeInBits()) {
+    for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end()))
+      if (isa<PHINode>(M) && MinBW < M->getType()->getScalarSizeInBits()) {
         Abort = true;
         break;
       }
     if (Abort)
       continue;
 
-    for (auto MI = ECs.member_begin(I), ME = ECs.member_end(); MI != ME; ++MI) {
-      if (!isa<Instruction>(*MI))
+    for (Value *M : llvm::make_range(ECs.member_begin(I), ECs.member_end())) {
+      if (!isa<Instruction>(M))
         continue;
-      Type *Ty = (*MI)->getType();
-      if (Roots.count(*MI))
-        Ty = cast<Instruction>(*MI)->getOperand(0)->getType();
+      Type *Ty = M->getType();
+      if (Roots.count(M))
+        Ty = cast<Instruction>(M)->getOperand(0)->getType();
       if (MinBW < Ty->getScalarSizeInBits())
-        MinBWs[cast<Instruction>(*MI)] = MinBW;
+        MinBWs[cast<Instruction>(M)] = MinBW;
     }
   }
 
@@ -708,6 +718,8 @@
 
 /// \returns \p I after propagating metadata from \p VL.
 Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) {
+  if (VL.empty())
+    return Inst;
   Instruction *I0 = cast<Instruction>(VL[0]);
   SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
   I0->getAllMetadataOtherThanDebugLoc(Metadata);
@@ -891,7 +903,6 @@
   return true;
 }
 
-
 bool llvm::maskIsAllOneOrUndef(Value *Mask) {
   assert(isa<VectorType>(Mask->getType()) &&
          isa<IntegerType>(Mask->getType()->getScalarType()) &&
@@ -957,12 +968,11 @@
   DFS.perform(LI);
   for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
     for (auto &I : *BB) {
-      auto *LI = dyn_cast<LoadInst>(&I);
-      auto *SI = dyn_cast<StoreInst>(&I);
-      if (!LI && !SI)
-        continue;
-
       Value *Ptr = getLoadStorePointerOperand(&I);
+      if (!Ptr)
+        continue;
+      Type *ElementTy = getLoadStoreType(&I);
+
       // We don't check wrapping here because we don't know yet if Ptr will be
       // part of a full group or a group with gaps. Checking wrapping for all
       // pointers (even those that end up in groups with no gaps) will be overly
@@ -974,8 +984,7 @@
                                     /*Assume=*/true, /*ShouldCheckWrap=*/false);
 
       const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
-      PointerType *PtrTy = cast<PointerType>(Ptr->getType());
-      uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
+      uint64_t Size = DL.getTypeAllocSize(ElementTy);
       AccessStrideInfo[&I] = StrideDescriptor(Stride, Scev, Size,
                                               getLoadStoreAlignment(&I));
     }
@@ -1300,10 +1309,14 @@
 
 std::string VFABI::mangleTLIVectorName(StringRef VectorName,
                                        StringRef ScalarName, unsigned numArgs,
-                                       unsigned VF) {
+                                       ElementCount VF) {
   SmallString<256> Buffer;
   llvm::raw_svector_ostream Out(Buffer);
-  Out << "_ZGV" << VFABI::_LLVM_ << "N" << VF;
+  Out << "_ZGV" << VFABI::_LLVM_ << "N";
+  if (VF.isScalable())
+    Out << 'x';
+  else
+    Out << VF.getFixedValue();
   for (unsigned I = 0; I < numArgs; ++I)
     Out << "v";
   Out << "_" << ScalarName << "(" << VectorName << ")";
diff --git a/src/llvm-project/llvm/lib/Analysis/models/generate_mock_model.py b/src/llvm-project/llvm/lib/Analysis/models/generate_mock_model.py
new file mode 100644
index 0000000..f1170ab
--- /dev/null
+++ b/src/llvm-project/llvm/lib/Analysis/models/generate_mock_model.py
@@ -0,0 +1,69 @@
+"""Generate a mock model for LLVM tests.
+
+The generated model is not a neural net - it is just a tf.function with the
+correct input and output parameters. By construction, the mock model will always
+output 1.
+"""
+
+import os
+import importlib.util
+import sys
+
+import tensorflow as tf
+
+
+def get_output_spec_path(path):
+  return os.path.join(path, 'output_spec.json')
+
+
+def build_mock_model(path, signature):
+  """Build and save the mock model with the given signature"""
+  module = tf.Module()
+
+  # We have to set this useless variable in order for the TF C API to correctly
+  # intake it
+  module.var = tf.Variable(0.)
+
+  def action(*inputs):
+    s = tf.reduce_sum([tf.cast(x, tf.float32) for x in tf.nest.flatten(inputs)])
+    return {signature['output']: float('inf') + s + module.var}
+
+  module.action = tf.function()(action)
+  action = {'action': module.action.get_concrete_function(signature['inputs'])}
+  tf.saved_model.save(module, path, signatures=action)
+
+  output_spec_path = get_output_spec_path(path)
+  with open(output_spec_path, 'w') as f:
+    print(f'Writing output spec to {output_spec_path}.')
+    f.write(signature['output_spec'])
+
+
+def get_external_signature(config_path):
+  """Get the signature for the desired model.
+
+  We manually import the python file at config_path to avoid adding a gin
+  dependency to the LLVM build.
+  """
+  spec = importlib.util.spec_from_file_location('config', config_path)
+  config = importlib.util.module_from_spec(spec)
+  spec.loader.exec_module(config)
+
+  return {
+      'inputs': config.get_input_signature(),
+      'output': config.get_output_signature(),
+      'output_spec': config.get_output_spec()
+  }
+
+
+def main(argv):
+  assert len(argv) == 3
+  config_path = argv[1]
+  model_path = argv[2]
+
+  print(f'Using config file at [{argv[1]}]')
+  signature = get_external_signature(config_path)
+  build_mock_model(model_path, signature)
+
+
+if __name__ == '__main__':
+  main(sys.argv)
diff --git a/src/llvm-project/llvm/lib/Analysis/models/inliner/README.txt b/src/llvm-project/llvm/lib/Analysis/models/inliner/README.txt
deleted file mode 100644
index a76bb89..0000000
--- a/src/llvm-project/llvm/lib/Analysis/models/inliner/README.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Reference model for inliner -Oz decision policy.
-Note that, currently, this model is also referenced by test/Transforms/Inline/ML
-tests - if replacing it, check those tests, too.
diff --git a/src/llvm-project/llvm/lib/Analysis/models/inliner/output_spec.json b/src/llvm-project/llvm/lib/Analysis/models/inliner/output_spec.json
deleted file mode 100644
index 5f9d13d..0000000
--- a/src/llvm-project/llvm/lib/Analysis/models/inliner/output_spec.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
-    {
-        "logging_name": "inlining_decision",
-        "tensor_spec": {
-            "name": "StatefulPartitionedCall",
-            "port": 0,
-            "type": "int64_t",
-            "shape": [
-                1
-            ]
-        }
-    }
-]
- 
\ No newline at end of file
diff --git a/src/llvm-project/llvm/lib/Analysis/models/inliner/saved_model.pbtxt b/src/llvm-project/llvm/lib/Analysis/models/inliner/saved_model.pbtxt
deleted file mode 100644
index ec522a8..0000000
--- a/src/llvm-project/llvm/lib/Analysis/models/inliner/saved_model.pbtxt
+++ /dev/null
@@ -1,32634 +0,0 @@
-saved_model_schema_version: 1
-meta_graphs {
-  meta_info_def {
-    stripped_op_list {
-      op {
-        name: "Const"
-        output_arg {
-          name: "output"
-          type_attr: "dtype"
-        }
-        attr {
-          name: "value"
-          type: "tensor"
-        }
-        attr {
-          name: "dtype"
-          type: "type"
-        }
-      }
-      op {
-        name: "NoOp"
-      }
-      op {
-        name: "PartitionedCall"
-        input_arg {
-          name: "args"
-          type_list_attr: "Tin"
-        }
-        output_arg {
-          name: "output"
-          type_list_attr: "Tout"
-        }
-        attr {
-          name: "Tin"
-          type: "list(type)"
-          has_minimum: true
-        }
-        attr {
-          name: "Tout"
-          type: "list(type)"
-          has_minimum: true
-        }
-        attr {
-          name: "f"
-          type: "func"
-        }
-        attr {
-          name: "config"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-        attr {
-          name: "config_proto"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-        attr {
-          name: "executor_type"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-      }
-      op {
-        name: "Placeholder"
-        output_arg {
-          name: "output"
-          type_attr: "dtype"
-        }
-        attr {
-          name: "dtype"
-          type: "type"
-        }
-        attr {
-          name: "shape"
-          type: "shape"
-          default_value {
-            shape {
-              unknown_rank: true
-            }
-          }
-        }
-      }
-      op {
-        name: "ReadVariableOp"
-        input_arg {
-          name: "resource"
-          type: DT_RESOURCE
-        }
-        output_arg {
-          name: "value"
-          type_attr: "dtype"
-        }
-        attr {
-          name: "dtype"
-          type: "type"
-        }
-        is_stateful: true
-      }
-      op {
-        name: "StatefulPartitionedCall"
-        input_arg {
-          name: "args"
-          type_list_attr: "Tin"
-        }
-        output_arg {
-          name: "output"
-          type_list_attr: "Tout"
-        }
-        attr {
-          name: "Tin"
-          type: "list(type)"
-          has_minimum: true
-        }
-        attr {
-          name: "Tout"
-          type: "list(type)"
-          has_minimum: true
-        }
-        attr {
-          name: "f"
-          type: "func"
-        }
-        attr {
-          name: "config"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-        attr {
-          name: "config_proto"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-        attr {
-          name: "executor_type"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-        is_stateful: true
-      }
-      op {
-        name: "VarHandleOp"
-        output_arg {
-          name: "resource"
-          type: DT_RESOURCE
-        }
-        attr {
-          name: "container"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-        attr {
-          name: "shared_name"
-          type: "string"
-          default_value {
-            s: ""
-          }
-        }
-        attr {
-          name: "dtype"
-          type: "type"
-        }
-        attr {
-          name: "shape"
-          type: "shape"
-        }
-        attr {
-          name: "allowed_devices"
-          type: "list(string)"
-          default_value {
-            list {
-            }
-          }
-        }
-        is_stateful: true
-      }
-    }
-    tags: "serve"
-    tensorflow_version: "1.15.0"
-    tensorflow_git_version: "unknown"
-    stripped_default_attrs: true
-  }
-  graph_def {
-    node {
-      name: "train_step"
-      op: "VarHandleOp"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-          }
-        }
-      }
-      attr {
-        key: "shared_name"
-        value {
-          s: "train_step"
-        }
-      }
-    }
-    node {
-      name: "train_step/Read/ReadVariableOp"
-      op: "ReadVariableOp"
-      input: "train_step"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense/kernel"
-      op: "VarHandleOp"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 34
-            }
-            dim {
-              size: 100
-            }
-          }
-        }
-      }
-      attr {
-        key: "shared_name"
-        value {
-          s: "QNetwork/EncodingNetwork/dense/kernel"
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense/kernel/Read/ReadVariableOp"
-      op: "ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense/kernel"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 34
-              }
-              dim {
-                size: 100
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense/bias"
-      op: "VarHandleOp"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 100
-            }
-          }
-        }
-      }
-      attr {
-        key: "shared_name"
-        value {
-          s: "QNetwork/EncodingNetwork/dense/bias"
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense/bias/Read/ReadVariableOp"
-      op: "ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense/bias"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 100
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense_1/kernel"
-      op: "VarHandleOp"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 100
-            }
-            dim {
-              size: 40
-            }
-          }
-        }
-      }
-      attr {
-        key: "shared_name"
-        value {
-          s: "QNetwork/EncodingNetwork/dense_1/kernel"
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense_1/kernel/Read/ReadVariableOp"
-      op: "ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense_1/kernel"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 100
-              }
-              dim {
-                size: 40
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense_1/bias"
-      op: "VarHandleOp"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 40
-            }
-          }
-        }
-      }
-      attr {
-        key: "shared_name"
-        value {
-          s: "QNetwork/EncodingNetwork/dense_1/bias"
-        }
-      }
-    }
-    node {
-      name: "QNetwork/EncodingNetwork/dense_1/bias/Read/ReadVariableOp"
-      op: "ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense_1/bias"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 40
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-    }
-    node {
-      name: "QNetwork/dense_2/kernel"
-      op: "VarHandleOp"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 40
-            }
-            dim {
-              size: 2
-            }
-          }
-        }
-      }
-      attr {
-        key: "shared_name"
-        value {
-          s: "QNetwork/dense_2/kernel"
-        }
-      }
-    }
-    node {
-      name: "QNetwork/dense_2/kernel/Read/ReadVariableOp"
-      op: "ReadVariableOp"
-      input: "QNetwork/dense_2/kernel"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 40
-              }
-              dim {
-                size: 2
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-    }
-    node {
-      name: "QNetwork/dense_2/bias"
-      op: "VarHandleOp"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 2
-            }
-          }
-        }
-      }
-      attr {
-        key: "shared_name"
-        value {
-          s: "QNetwork/dense_2/bias"
-        }
-      }
-    }
-    node {
-      name: "QNetwork/dense_2/bias/Read/ReadVariableOp"
-      op: "ReadVariableOp"
-      input: "QNetwork/dense_2/bias"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 2
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-    }
-    node {
-      name: "NoOp"
-      op: "NoOp"
-    }
-    node {
-      name: "Const"
-      op: "Const"
-      device: "/device:CPU:0"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_STRING
-        }
-      }
-      attr {
-        key: "value"
-        value {
-          tensor {
-            dtype: DT_STRING
-            tensor_shape {
-            }
-            string_val: "\nu\n\023\010\001\022\017_time_step_spec\n\024\010\002\022\020_trajectory_spec\n\023\010\003\022\017_wrapped_policy\n\016\010\004\022\ntrain_step\n\023\010\005\022\017model_variables\n\016\010\006\022\nsignatures\n\030\n\017\010\007\022\013observation\n\005\010\007\022\0013\n\030\n\017\010\007\022\013observation\n\005\010\007\022\0011\n;\n\016\010\010\022\n_q_network\n\023\010\001\022\017_time_step_spec\n\024\010\t\022\020_trajectory_spec\nE\022C\n\016VARIABLE_VALUE\022\ntrain_step\032%train_step/.ATTRIBUTES/VARIABLE_VALUE\n*\n\005\010\n\022\0010\n\005\010\013\022\0011\n\005\010\014\022\0012\n\005\010\r\022\0013\n\005\010\016\022\0014\n\005\010\017\022\0015\n\000\n\000\n\214\001\n\026\010\020\022\022_input_tensor_spec\n\014\010\021\022\010_encoder\n\022\010\022\022\016_q_value_layer\n\r\010\023\022\tvariables\n\031\010\024\022\025regularization_losses\n\027\010\025\022\023trainable_variables\n\r\010\026\022\tkeras_api\n\030\n\017\010\007\022\013observation\n\005\010\007\022\0011\ng\022e\n\016VARIABLE_VALUE\022%QNetwork/EncodingNetwork/dense/kernel\032,model_variables/0/.ATTRIBUTES/VARIABLE_VALUE\ne\022c\n\016VARIABLE_VALUE\022#QNetwork/EncodingNetwork/dense/bias\032,model_variables/1/.ATTRIBUTES/VARIABLE_VALUE\ni\022g\n\016VARIABLE_VALUE\022\'QNetwork/EncodingNetwork/dense_1/kernel\032,model_variables/2/.ATTRIBUTES/VARIABLE_VALUE\ng\022e\n\016VARIABLE_VALUE\022%QNetwork/EncodingNetwork/dense_1/bias\032,model_variables/3/.ATTRIBUTES/VARIABLE_VALUE\nY\022W\n\016VARIABLE_VALUE\022\027QNetwork/dense_2/kernel\032,model_variables/4/.ATTRIBUTES/VARIABLE_VALUE\nW\022U\n\016VARIABLE_VALUE\022\025QNetwork/dense_2/bias\032,model_variables/5/.ATTRIBUTES/VARIABLE_VALUE\n\000\n\334\001\n\026\010\027\022\022_input_tensor_spec\n\027\010\030\022\023_preprocessing_nest\n\036\010\031\022\032_flat_preprocessing_layers\n\033\010\032\022\027_preprocessing_combiner\n\032\010\033\022\026_postprocessing_layers\n\r\010\034\022\tvariables\n\031\010\035\022\025regularization_losses\n\027\010\036\022\023trainable_variables\n\r\010\037\022\tkeras_api\nh\n\n\010\016\022\006kernel\n\010\010\017\022\004bias\n\r\010 \022\tvariables\n\031\010!\022\025regularization_losses\n\027\010\"\022\023trainable_variables\n\r\010#\022\tkeras_api\n*\n\005\010\n\022\0010\n\005\010\013\022\0011\n\005\010\014\022\0012\n\005\010\r\022\0013\n\005\010\016\022\0014\n\005\010\017\022\0015\n\000\n*\n\005\010\n\022\0010\n\005\010\013\022\0011\n\005\010\014\022\0012\n\005\010\r\022\0013\n\005\010\016\022\0014\n\005\010\017\022\0015\n\255\001\n\021\010$\022\rlayer_metrics\n\r\010\023\022\tvariables\n\037\010%\022\033layer_regularization_losses\n\013\010&\022\007metrics\n\n\010\'\022\006layers\n\031\010\024\022\025regularization_losses\n\033\010(\022\027non_trainable_variables\n\027\010\025\022\023trainable_variables\n\000\n\000\nV\n\005\010)\022\0010\n\005\010*\022\0011\n\005\010+\022\0012\n\005\010,\022\0013\n\005\010-\022\0014\n\005\010.\022\0015\n\005\010/\022\0016\n\005\0100\022\0017\n\005\0101\022\0018\n\005\0102\022\0019\n\006\0103\022\00210\n\006\0104\022\00211\nR\n\r\0105\022\tvariables\n\031\0106\022\025regularization_losses\n\027\0107\022\023trainable_variables\n\r\0108\022\tkeras_api\n\025\n\005\0109\022\0010\n\005\010:\022\0011\n\005\010;\022\0012\n\034\n\005\010\n\022\0010\n\005\010\013\022\0011\n\005\010\014\022\0012\n\005\010\r\022\0013\n\000\n\034\n\005\010\n\022\0010\n\005\010\013\022\0011\n\005\010\014\022\0012\n\005\010\r\022\0013\n\255\001\n\021\010<\022\rlayer_metrics\n\r\010\034\022\tvariables\n\037\010=\022\033layer_regularization_losses\n\013\010>\022\007metrics\n\n\010?\022\006layers\n\031\010\035\022\025regularization_losses\n\033\010@\022\027non_trainable_variables\n\027\010\036\022\023trainable_variables\n\016\n\005\010\016\022\0010\n\005\010\017\022\0011\n\000\n\016\n\005\010\016\022\0010\n\005\010\017\022\0011\n\255\001\n\021\010A\022\rlayer_metrics\n\r\010 \022\tvariables\n\037\010B\022\033layer_regularization_losses\n\013\010C\022\007metrics\n\n\010D\022\006layers\n\031\010!\022\025regularization_losses\n\033\010E\022\027non_trainable_variables\n\027\010\"\022\023trainable_variables\n\000\n\000\n\000\n\016\n\005\010\021\022\0010\n\005\010\022\022\0011\n\000\nR\n\r\010F\022\tvariables\n\031\010G\022\025regularization_losses\n\027\010H\022\023trainable_variables\n\r\010I\022\tkeras_api\nR\n\r\010J\022\tvariables\n\031\010K\022\025regularization_losses\n\027\010L\022\023trainable_variables\n\r\010M\022\tkeras_api\nR\n\r\010N\022\tvariables\n\031\010O\022\025regularization_losses\n\027\010P\022\023trainable_variables\n\r\010Q\022\tkeras_api\nR\n\r\010R\022\tvariables\n\031\010S\022\025regularization_losses\n\027\010T\022\023trainable_variables\n\r\010U\022\tkeras_api\nR\n\r\010V\022\tvariables\n\031\010W\022\025regularization_losses\n\027\010X\022\023trainable_variables\n\r\010Y\022\tkeras_api\nR\n\r\010Z\022\tvariables\n\031\010[\022\025regularization_losses\n\027\010\\\022\023trainable_variables\n\r\010]\022\tkeras_api\nR\n\r\010^\022\tvariables\n\031\010_\022\025regularization_losses\n\027\010`\022\023trainable_variables\n\r\010a\022\tkeras_api\nR\n\r\010b\022\tvariables\n\031\010c\022\025regularization_losses\n\027\010d\022\023trainable_variables\n\r\010e\022\tkeras_api\nR\n\r\010f\022\tvariables\n\031\010g\022\025regularization_losses\n\027\010h\022\023trainable_variables\n\r\010i\022\tkeras_api\nR\n\r\010j\022\tvariables\n\031\010k\022\025regularization_losses\n\027\010l\022\023trainable_variables\n\r\010m\022\tkeras_api\nR\n\r\010n\022\tvariables\n\031\010o\022\025regularization_losses\n\027\010p\022\023trainable_variables\n\r\010q\022\tkeras_api\nR\n\r\010r\022\tvariables\n\031\010s\022\025regularization_losses\n\027\010t\022\023trainable_variables\n\r\010u\022\tkeras_api\n\000\n\000\n\000\n\255\001\n\021\010v\022\rlayer_metrics\n\r\0105\022\tvariables\n\037\010w\022\033layer_regularization_losses\n\013\010x\022\007metrics\n\n\010y\022\006layers\n\031\0106\022\025regularization_losses\n\033\010z\022\027non_trainable_variables\n\027\0107\022\023trainable_variables\nR\n\r\010{\022\tvariables\n\031\010|\022\025regularization_losses\n\027\010}\022\023trainable_variables\n\r\010~\022\tkeras_api\nk\n\n\010\n\022\006kernel\n\010\010\013\022\004bias\n\r\010\177\022\tvariables\n\032\010\200\001\022\025regularization_losses\n\030\010\201\001\022\023trainable_variables\n\016\010\202\001\022\tkeras_api\nl\n\n\010\014\022\006kernel\n\010\010\r\022\004bias\n\016\010\203\001\022\tvariables\n\032\010\204\001\022\025regularization_losses\n\030\010\205\001\022\023trainable_variables\n\016\010\206\001\022\tkeras_api\n\000\n\000\n\000\nv\n\005\010)\022\0010\n\005\010*\022\0011\n\005\010+\022\0012\n\005\010,\022\0013\n\005\010-\022\0014\n\005\010.\022\0015\n\005\010/\022\0016\n\005\0100\022\0017\n\005\0101\022\0018\n\005\0102\022\0019\n\006\0103\022\00210\n\006\0104\022\00211\n\006\010\032\022\00212\n\006\0109\022\00213\n\006\010:\022\00214\n\006\010;\022\00215\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\262\001\n\022\010\207\001\022\rlayer_metrics\n\r\010F\022\tvariables\n \010\210\001\022\033layer_regularization_losses\n\014\010\211\001\022\007metrics\n\013\010\212\001\022\006layers\n\031\010G\022\025regularization_losses\n\034\010\213\001\022\027non_trainable_variables\n\027\010H\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\214\001\022\rlayer_metrics\n\r\010J\022\tvariables\n \010\215\001\022\033layer_regularization_losses\n\014\010\216\001\022\007metrics\n\013\010\217\001\022\006layers\n\031\010K\022\025regularization_losses\n\034\010\220\001\022\027non_trainable_variables\n\027\010L\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\221\001\022\rlayer_metrics\n\r\010N\022\tvariables\n \010\222\001\022\033layer_regularization_losses\n\014\010\223\001\022\007metrics\n\013\010\224\001\022\006layers\n\031\010O\022\025regularization_losses\n\034\010\225\001\022\027non_trainable_variables\n\027\010P\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\226\001\022\rlayer_metrics\n\r\010R\022\tvariables\n \010\227\001\022\033layer_regularization_losses\n\014\010\230\001\022\007metrics\n\013\010\231\001\022\006layers\n\031\010S\022\025regularization_losses\n\034\010\232\001\022\027non_trainable_variables\n\027\010T\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\233\001\022\rlayer_metrics\n\r\010V\022\tvariables\n \010\234\001\022\033layer_regularization_losses\n\014\010\235\001\022\007metrics\n\013\010\236\001\022\006layers\n\031\010W\022\025regularization_losses\n\034\010\237\001\022\027non_trainable_variables\n\027\010X\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\240\001\022\rlayer_metrics\n\r\010Z\022\tvariables\n \010\241\001\022\033layer_regularization_losses\n\014\010\242\001\022\007metrics\n\013\010\243\001\022\006layers\n\031\010[\022\025regularization_losses\n\034\010\244\001\022\027non_trainable_variables\n\027\010\\\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\245\001\022\rlayer_metrics\n\r\010^\022\tvariables\n \010\246\001\022\033layer_regularization_losses\n\014\010\247\001\022\007metrics\n\013\010\250\001\022\006layers\n\031\010_\022\025regularization_losses\n\034\010\251\001\022\027non_trainable_variables\n\027\010`\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\252\001\022\rlayer_metrics\n\r\010b\022\tvariables\n \010\253\001\022\033layer_regularization_losses\n\014\010\254\001\022\007metrics\n\013\010\255\001\022\006layers\n\031\010c\022\025regularization_losses\n\034\010\256\001\022\027non_trainable_variables\n\027\010d\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\257\001\022\rlayer_metrics\n\r\010f\022\tvariables\n \010\260\001\022\033layer_regularization_losses\n\014\010\261\001\022\007metrics\n\013\010\262\001\022\006layers\n\031\010g\022\025regularization_losses\n\034\010\263\001\022\027non_trainable_variables\n\027\010h\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\264\001\022\rlayer_metrics\n\r\010j\022\tvariables\n \010\265\001\022\033layer_regularization_losses\n\014\010\266\001\022\007metrics\n\013\010\267\001\022\006layers\n\031\010k\022\025regularization_losses\n\034\010\270\001\022\027non_trainable_variables\n\027\010l\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\271\001\022\rlayer_metrics\n\r\010n\022\tvariables\n \010\272\001\022\033layer_regularization_losses\n\014\010\273\001\022\007metrics\n\013\010\274\001\022\006layers\n\031\010o\022\025regularization_losses\n\034\010\275\001\022\027non_trainable_variables\n\027\010p\022\023trainable_variables\n\000\n\000\n\000\n\262\001\n\022\010\276\001\022\rlayer_metrics\n\r\010r\022\tvariables\n \010\277\001\022\033layer_regularization_losses\n\014\010\300\001\022\007metrics\n\013\010\301\001\022\006layers\n\031\010s\022\025regularization_losses\n\034\010\302\001\022\027non_trainable_variables\n\027\010t\022\023trainable_variables\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\262\001\n\022\010\303\001\022\rlayer_metrics\n\r\010{\022\tvariables\n \010\304\001\022\033layer_regularization_losses\n\014\010\305\001\022\007metrics\n\013\010\306\001\022\006layers\n\031\010|\022\025regularization_losses\n\034\010\307\001\022\027non_trainable_variables\n\027\010}\022\023trainable_variables\n\016\n\005\010\n\022\0010\n\005\010\013\022\0011\n\000\n\016\n\005\010\n\022\0010\n\005\010\013\022\0011\n\264\001\n\022\010\310\001\022\rlayer_metrics\n\r\010\177\022\tvariables\n \010\311\001\022\033layer_regularization_losses\n\014\010\312\001\022\007metrics\n\013\010\313\001\022\006layers\n\032\010\200\001\022\025regularization_losses\n\034\010\314\001\022\027non_trainable_variables\n\030\010\201\001\022\023trainable_variables\n\016\n\005\010\014\022\0010\n\005\010\r\022\0011\n\000\n\016\n\005\010\014\022\0010\n\005\010\r\022\0011\n\265\001\n\022\010\315\001\022\rlayer_metrics\n\016\010\203\001\022\tvariables\n \010\316\001\022\033layer_regularization_losses\n\014\010\317\001\022\007metrics\n\013\010\320\001\022\006layers\n\032\010\204\001\022\025regularization_losses\n\034\010\321\001\022\027non_trainable_variables\n\030\010\205\001\022\023trainable_variables\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000"
-          }
-        }
-      }
-    }
-    node {
-      name: "action_callee_basic_block_count"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_callee_conditionally_executed_blocks"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_callee_users"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_caller_basic_block_count"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_caller_conditionally_executed_blocks"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_caller_users"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_callsite_height"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_cost_estimate"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_discount"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_edge_count"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_inlining_default"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_node_count"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_nr_ctant_params"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT64
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_reward"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_FLOAT
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "action_step_type"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_INT32
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-    }
-    node {
-      name: "StatefulPartitionedCall"
-      op: "StatefulPartitionedCall"
-      input: "action_callee_basic_block_count"
-      input: "action_callee_conditionally_executed_blocks"
-      input: "action_callee_users"
-      input: "action_caller_basic_block_count"
-      input: "action_caller_conditionally_executed_blocks"
-      input: "action_caller_users"
-      input: "action_callsite_height"
-      input: "action_cost_estimate"
-      input: "action_discount"
-      input: "action_edge_count"
-      input: "action_inlining_default"
-      input: "action_node_count"
-      input: "action_nr_ctant_params"
-      input: "action_reward"
-      input: "action_step_type"
-      input: "QNetwork/EncodingNetwork/dense/kernel"
-      input: "QNetwork/EncodingNetwork/dense/bias"
-      input: "QNetwork/EncodingNetwork/dense_1/kernel"
-      input: "QNetwork/EncodingNetwork/dense_1/bias"
-      input: "QNetwork/dense_2/kernel"
-      input: "QNetwork/dense_2/bias"
-      attr {
-        key: "Tin"
-        value {
-          list {
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_FLOAT
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_INT64
-            type: DT_FLOAT
-            type: DT_INT32
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-          }
-        }
-      }
-      attr {
-        key: "Tout"
-        value {
-          list {
-            type: DT_INT64
-          }
-        }
-      }
-      attr {
-        key: "_collective_manager_ids"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-              dim {
-                size: 1
-              }
-            }
-          }
-        }
-      }
-      attr {
-        key: "_read_only_resource_inputs"
-        value {
-          list {
-            i: 15
-            i: 16
-            i: 17
-            i: 18
-            i: 19
-            i: 20
-          }
-        }
-      }
-      attr {
-        key: "config_proto"
-        value {
-          s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-        }
-      }
-      attr {
-        key: "f"
-        value {
-          func {
-            name: "__inference_signature_wrapper_4619026"
-          }
-        }
-      }
-    }
-    node {
-      name: "PartitionedCall"
-      op: "PartitionedCall"
-      attr {
-        key: "Tin"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "Tout"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "_collective_manager_ids"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "_read_only_resource_inputs"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "config_proto"
-        value {
-          s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-        }
-      }
-      attr {
-        key: "f"
-        value {
-          func {
-            name: "__inference_signature_wrapper_4619033"
-          }
-        }
-      }
-    }
-    node {
-      name: "StatefulPartitionedCall_1"
-      op: "StatefulPartitionedCall"
-      input: "train_step"
-      attr {
-        key: "Tin"
-        value {
-          list {
-            type: DT_RESOURCE
-          }
-        }
-      }
-      attr {
-        key: "Tout"
-        value {
-          list {
-            type: DT_INT64
-          }
-        }
-      }
-      attr {
-        key: "_collective_manager_ids"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "_read_only_resource_inputs"
-        value {
-          list {
-            i: 0
-          }
-        }
-      }
-      attr {
-        key: "config_proto"
-        value {
-          s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-        }
-      }
-      attr {
-        key: "f"
-        value {
-          func {
-            name: "__inference_signature_wrapper_4619048"
-          }
-        }
-      }
-    }
-    node {
-      name: "saver_filename"
-      op: "Placeholder"
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "dtype"
-        value {
-          type: DT_STRING
-        }
-      }
-      attr {
-        key: "shape"
-        value {
-          shape {
-          }
-        }
-      }
-    }
-    node {
-      name: "StatefulPartitionedCall_2"
-      op: "StatefulPartitionedCall"
-      input: "saver_filename"
-      input: "train_step/Read/ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense/kernel/Read/ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense/bias/Read/ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense_1/kernel/Read/ReadVariableOp"
-      input: "QNetwork/EncodingNetwork/dense_1/bias/Read/ReadVariableOp"
-      input: "QNetwork/dense_2/kernel/Read/ReadVariableOp"
-      input: "QNetwork/dense_2/bias/Read/ReadVariableOp"
-      input: "Const"
-      attr {
-        key: "Tin"
-        value {
-          list {
-            type: DT_STRING
-            type: DT_INT64
-            type: DT_FLOAT
-            type: DT_FLOAT
-            type: DT_FLOAT
-            type: DT_FLOAT
-            type: DT_FLOAT
-            type: DT_FLOAT
-            type: DT_STRING
-          }
-        }
-      }
-      attr {
-        key: "Tout"
-        value {
-          list {
-            type: DT_STRING
-          }
-        }
-      }
-      attr {
-        key: "_collective_manager_ids"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "_read_only_resource_inputs"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "config_proto"
-        value {
-          s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-        }
-      }
-      attr {
-        key: "f"
-        value {
-          func {
-            name: "__inference__traced_save_4619143"
-          }
-        }
-      }
-    }
-    node {
-      name: "StatefulPartitionedCall_3"
-      op: "StatefulPartitionedCall"
-      input: "saver_filename"
-      input: "train_step"
-      input: "QNetwork/EncodingNetwork/dense/kernel"
-      input: "QNetwork/EncodingNetwork/dense/bias"
-      input: "QNetwork/EncodingNetwork/dense_1/kernel"
-      input: "QNetwork/EncodingNetwork/dense_1/bias"
-      input: "QNetwork/dense_2/kernel"
-      input: "QNetwork/dense_2/bias"
-      attr {
-        key: "Tin"
-        value {
-          list {
-            type: DT_STRING
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-            type: DT_RESOURCE
-          }
-        }
-      }
-      attr {
-        key: "Tout"
-        value {
-          list {
-            type: DT_STRING
-          }
-        }
-      }
-      attr {
-        key: "_collective_manager_ids"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "_output_shapes"
-        value {
-          list {
-            shape {
-            }
-          }
-        }
-      }
-      attr {
-        key: "_read_only_resource_inputs"
-        value {
-          list {
-          }
-        }
-      }
-      attr {
-        key: "config_proto"
-        value {
-          s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-        }
-      }
-      attr {
-        key: "f"
-        value {
-          func {
-            name: "__inference__traced_restore_4619176"
-          }
-        }
-      }
-    }
-    library {
-      function {
-        signature {
-          name: "__inference_signature_wrapper_4619048"
-          input_arg {
-            name: "unknown"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-          control_output: "StatefulPartitionedCall"
-        }
-        node_def {
-          name: "StatefulPartitionedCall"
-          op: "StatefulPartitionedCall"
-          input: "unknown"
-          attr {
-            key: "Tin"
-            value {
-              list {
-                type: DT_RESOURCE
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-                type: DT_INT64
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-                i: 0
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_function_with_signature_4619040"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StatefulPartitionedCall"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "StatefulPartitionedCall:output:0"
-          input: "^StatefulPartitionedCall"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "StatefulPartitionedCall"
-          value: "StatefulPartitionedCall"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_function_with_signature_4619029"
-        }
-        node_def {
-          name: "PartitionedCall"
-          op: "PartitionedCall"
-          attr {
-            key: "Tin"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_function_722"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "PartitionedCall"
-          }
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_action_931"
-          input_arg {
-            name: "time_step"
-            type: DT_INT32
-          }
-          input_arg {
-            name: "time_step_1"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "time_step_2"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "time_step_3"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_4"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_5"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_6"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_7"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_8"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_9"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_10"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_11"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_12"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_13"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_14"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "qnetwork_encodingnetwork_dense_matmul_readvariableop_resource"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "qnetwork_encodingnetwork_dense_biasadd_readvariableop_resource"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "qnetwork_encodingnetwork_dense_1_matmul_readvariableop_resource"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "qnetwork_encodingnetwork_dense_1_biasadd_readvariableop_resource"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "qnetwork_dense_2_matmul_readvariableop_resource"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "qnetwork_dense_2_biasadd_readvariableop_resource"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_3"
-          input: "QNetwork/EncodingNetwork/lambda/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 10
-                f: 10
-                f: 11
-                f: 12
-                f: 13
-                f: 14
-                f: 14
-                f: 14
-                f: 16
-                f: 17
-                f: 19
-                f: 23
-                f: 27
-                f: 39
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_4"
-          input: "QNetwork/EncodingNetwork/lambda_1/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_1/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 3
-                f: 3
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 7
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 9
-                f: 10
-                f: 10
-                f: 10
-                f: 12
-                f: 12
-                f: 12
-                f: 14
-                f: 14
-                f: 18
-                f: 20
-                f: 23
-                f: 30
-                f: 41
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_1/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_1/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_1/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_1/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_1/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_1/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_1/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_1/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_1/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_1/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_1/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_1/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_5"
-          input: "QNetwork/EncodingNetwork/lambda_2/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_2/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 23
-                f: 23
-                f: 23
-                f: 24
-                f: 24
-                f: 24
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 26
-                f: 26
-                f: 26
-                f: 27
-                f: 27
-                f: 27
-                f: 27
-                f: 28
-                f: 28
-                f: 29
-                f: 29
-                f: 29
-                f: 29
-                f: 30
-                f: 30
-                f: 31
-                f: 31
-                f: 31
-                f: 31
-                f: 32
-                f: 32
-                f: 33
-                f: 33
-                f: 33
-                f: 34
-                f: 34
-                f: 34
-                f: 34
-                f: 35
-                f: 35
-                f: 36
-                f: 36
-                f: 37
-                f: 37
-                f: 37
-                f: 38
-                f: 38
-                f: 39
-                f: 39
-                f: 40
-                f: 40
-                f: 41
-                f: 41
-                f: 41
-                f: 42
-                f: 43
-                f: 43
-                f: 44
-                f: 44
-                f: 45
-                f: 45
-                f: 46
-                f: 46
-                f: 46
-                f: 47
-                f: 47
-                f: 48
-                f: 49
-                f: 49
-                f: 50
-                f: 50
-                f: 51
-                f: 52
-                f: 53
-                f: 53
-                f: 54
-                f: 55
-                f: 56
-                f: 57
-                f: 57
-                f: 58
-                f: 59
-                f: 60
-                f: 61
-                f: 61
-                f: 63
-                f: 63
-                f: 64
-                f: 65
-                f: 66
-                f: 67
-                f: 67
-                f: 69
-                f: 70
-                f: 71
-                f: 72
-                f: 73
-                f: 74
-                f: 75
-                f: 77
-                f: 78
-                f: 79
-                f: 80
-                f: 81
-                f: 82
-                f: 83
-                f: 85
-                f: 86
-                f: 88
-                f: 89
-                f: 91
-                f: 92
-                f: 94
-                f: 96
-                f: 97
-                f: 99
-                f: 100
-                f: 101
-                f: 103
-                f: 105
-                f: 107
-                f: 109
-                f: 111
-                f: 113
-                f: 115
-                f: 118
-                f: 121
-                f: 123
-                f: 126
-                f: 128
-                f: 130
-                f: 133
-                f: 135
-                f: 137
-                f: 140
-                f: 143
-                f: 146
-                f: 148
-                f: 151
-                f: 154
-                f: 157
-                f: 161
-                f: 163
-                f: 166
-                f: 169
-                f: 173
-                f: 178
-                f: 183
-                f: 189
-                f: 193
-                f: 197
-                f: 202
-                f: 208
-                f: 213
-                f: 218
-                f: 223
-                f: 228
-                f: 233
-                f: 239
-                f: 245
-                f: 250
-                f: 257
-                f: 262
-                f: 269
-                f: 277
-                f: 284
-                f: 292
-                f: 300
-                f: 308
-                f: 319
-                f: 329
-                f: 340
-                f: 349
-                f: 359
-                f: 371
-                f: 382
-                f: 394
-                f: 410
-                f: 423
-                f: 435
-                f: 445
-                f: 462
-                f: 480
-                f: 492
-                f: 506
-                f: 519
-                f: 536
-                f: 557
-                f: 577
-                f: 598
-                f: 622
-                f: 655
-                f: 679
-                f: 707
-                f: 733
-                f: 751
-                f: 787
-                f: 814
-                f: 847
-                f: 897
-                f: 934
-                f: 997
-                f: 1062
-                f: 1111
-                f: 1181
-                f: 1275
-                f: 1385
-                f: 1465
-                f: 1603
-                f: 1769
-                f: 2057
-                f: 2257
-                f: 2803
-                f: 3468
-                f: 4417
-                f: 6538
-                f: 16126
-                f: 23446
-                f: 33536
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_2/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_2/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_2/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_2/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_2/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_2/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_2/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_2/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_2/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_2/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_2/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_2/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_6"
-          input: "QNetwork/EncodingNetwork/lambda_3/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_3/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 23
-                f: 23
-                f: 23
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 27
-                f: 27
-                f: 27
-                f: 27
-                f: 27
-                f: 28
-                f: 28
-                f: 28
-                f: 29
-                f: 29
-                f: 29
-                f: 29
-                f: 30
-                f: 30
-                f: 30
-                f: 31
-                f: 31
-                f: 31
-                f: 32
-                f: 32
-                f: 32
-                f: 33
-                f: 33
-                f: 33
-                f: 34
-                f: 34
-                f: 34
-                f: 34
-                f: 35
-                f: 35
-                f: 35
-                f: 36
-                f: 36
-                f: 36
-                f: 37
-                f: 37
-                f: 37
-                f: 38
-                f: 38
-                f: 38
-                f: 38
-                f: 39
-                f: 39
-                f: 40
-                f: 40
-                f: 41
-                f: 41
-                f: 42
-                f: 43
-                f: 43
-                f: 44
-                f: 45
-                f: 45
-                f: 46
-                f: 47
-                f: 47
-                f: 48
-                f: 49
-                f: 49
-                f: 50
-                f: 50
-                f: 52
-                f: 52
-                f: 53
-                f: 54
-                f: 55
-                f: 55
-                f: 57
-                f: 58
-                f: 59
-                f: 60
-                f: 62
-                f: 64
-                f: 65
-                f: 66
-                f: 68
-                f: 70
-                f: 70
-                f: 70
-                f: 70
-                f: 70
-                f: 71
-                f: 73
-                f: 75
-                f: 76
-                f: 78
-                f: 81
-                f: 84
-                f: 86
-                f: 90
-                f: 94
-                f: 98
-                f: 101
-                f: 106
-                f: 111
-                f: 117
-                f: 123
-                f: 130
-                f: 138
-                f: 146
-                f: 157
-                f: 163
-                f: 176
-                f: 187
-                f: 198
-                f: 214
-                f: 227
-                f: 252
-                f: 280
-                f: 327
-                f: 395
-                f: 506
-                f: 671
-                f: 1025
-                f: 1971
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_3/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_3/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_3/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_3/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_3/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_3/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_3/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_3/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_3/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_3/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_3/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_3/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_7"
-          input: "QNetwork/EncodingNetwork/lambda_4/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_4/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 5
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 7
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 11
-                f: 11
-                f: 11
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 13
-                f: 13
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 19
-                f: 19
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 21
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 25
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 27
-                f: 28
-                f: 28
-                f: 28
-                f: 28
-                f: 28
-                f: 29
-                f: 30
-                f: 30
-                f: 30
-                f: 30
-                f: 30
-                f: 30
-                f: 31
-                f: 32
-                f: 32
-                f: 32
-                f: 32
-                f: 32
-                f: 34
-                f: 34
-                f: 34
-                f: 34
-                f: 34
-                f: 34
-                f: 35
-                f: 36
-                f: 36
-                f: 36
-                f: 37
-                f: 38
-                f: 38
-                f: 38
-                f: 39
-                f: 40
-                f: 40
-                f: 41
-                f: 42
-                f: 42
-                f: 43
-                f: 44
-                f: 44
-                f: 46
-                f: 46
-                f: 47
-                f: 48
-                f: 48
-                f: 50
-                f: 50
-                f: 52
-                f: 52
-                f: 54
-                f: 55
-                f: 55
-                f: 56
-                f: 57
-                f: 58
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 62
-                f: 62
-                f: 64
-                f: 65
-                f: 66
-                f: 68
-                f: 70
-                f: 72
-                f: 74
-                f: 77
-                f: 80
-                f: 82
-                f: 86
-                f: 89
-                f: 92
-                f: 96
-                f: 99
-                f: 104
-                f: 108
-                f: 114
-                f: 119
-                f: 125
-                f: 131
-                f: 139
-                f: 146
-                f: 157
-                f: 167
-                f: 176
-                f: 188
-                f: 198
-                f: 215
-                f: 236
-                f: 262
-                f: 306
-                f: 376
-                f: 462
-                f: 596
-                f: 942
-                f: 1428
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_4/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_4/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_4/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_4/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_4/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_4/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_4/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_4/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_4/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_4/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_4/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_4/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_8"
-          input: "QNetwork/EncodingNetwork/lambda_5/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_5/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 10
-                f: 10
-                f: 11
-                f: 11
-                f: 12
-                f: 13
-                f: 14
-                f: 15
-                f: 16
-                f: 18
-                f: 20
-                f: 23
-                f: 29
-                f: 38
-                f: 60
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_5/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_5/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_5/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_5/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_5/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_5/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_5/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_5/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_5/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_5/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_5/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_5/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_9"
-          input: "QNetwork/EncodingNetwork/lambda_6/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_6/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 3
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 4
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 6
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 7
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 8
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 9
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 11
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 12
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 13
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 14
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 16
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 17
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 18
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 19
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 21
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 22
-                f: 23
-                f: 23
-                f: 23
-                f: 23
-                f: 23
-                f: 23
-                f: 23
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 24
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 26
-                f: 26
-                f: 26
-                f: 26
-                f: 27
-                f: 27
-                f: 27
-                f: 28
-                f: 28
-                f: 28
-                f: 29
-                f: 29
-                f: 30
-                f: 30
-                f: 30
-                f: 31
-                f: 31
-                f: 32
-                f: 32
-                f: 33
-                f: 33
-                f: 34
-                f: 35
-                f: 37
-                f: 38
-                f: 40
-                f: 46
-                f: 51
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_6/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_6/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_6/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_6/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_6/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_6/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_6/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_6/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_6/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_6/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_6/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_6/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_10"
-          input: "QNetwork/EncodingNetwork/lambda_7/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_7/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: -15035
-                f: -15030
-                f: -15025
-                f: -15000
-                f: -14985
-                f: -14945
-                f: -14745
-                f: -70
-                f: -55
-                f: -55
-                f: -50
-                f: -50
-                f: -50
-                f: -45
-                f: -45
-                f: -45
-                f: -45
-                f: -45
-                f: -45
-                f: -45
-                f: -45
-                f: -45
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -40
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -35
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -30
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -25
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -20
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -15
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -10
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: -5
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 5
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 10
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 15
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 20
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 25
-                f: 30
-                f: 30
-                f: 30
-                f: 30
-                f: 30
-                f: 30
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 35
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 40
-                f: 45
-                f: 45
-                f: 45
-                f: 45
-                f: 45
-                f: 45
-                f: 45
-                f: 45
-                f: 45
-                f: 45
-                f: 50
-                f: 50
-                f: 50
-                f: 50
-                f: 50
-                f: 50
-                f: 50
-                f: 50
-                f: 50
-                f: 55
-                f: 55
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 60
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 65
-                f: 70
-                f: 70
-                f: 70
-                f: 70
-                f: 70
-                f: 70
-                f: 70
-                f: 75
-                f: 75
-                f: 80
-                f: 80
-                f: 80
-                f: 85
-                f: 85
-                f: 85
-                f: 90
-                f: 90
-                f: 90
-                f: 90
-                f: 95
-                f: 95
-                f: 100
-                f: 100
-                f: 105
-                f: 110
-                f: 115
-                f: 120
-                f: 125
-                f: 125
-                f: 130
-                f: 140
-                f: 140
-                f: 145
-                f: 150
-                f: 155
-                f: 160
-                f: 160
-                f: 165
-                f: 170
-                f: 175
-                f: 180
-                f: 190
-                f: 200
-                f: 210
-                f: 215
-                f: 220
-                f: 220
-                f: 230
-                f: 235
-                f: 245
-                f: 250
-                f: 260
-                f: 275
-                f: 290
-                f: 305
-                f: 325
-                f: 350
-                f: 370
-                f: 390
-                f: 425
-                f: 460
-                f: 500
-                f: 560
-                f: 650
-                f: 790
-                f: 1025
-                f: 1600
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_7/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_7/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_7/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_7/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_7/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_7/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_7/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_7/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_7/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_7/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_7/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_7/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_11"
-          input: "QNetwork/EncodingNetwork/lambda_8/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_8/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 18
-                f: 29
-                f: 39
-                f: 48
-                f: 57
-                f: 64
-                f: 70
-                f: 76
-                f: 82
-                f: 87
-                f: 92
-                f: 97
-                f: 101
-                f: 105
-                f: 109
-                f: 113
-                f: 116
-                f: 120
-                f: 123
-                f: 127
-                f: 130
-                f: 134
-                f: 137
-                f: 140
-                f: 143
-                f: 146
-                f: 149
-                f: 152
-                f: 156
-                f: 159
-                f: 162
-                f: 165
-                f: 168
-                f: 171
-                f: 174
-                f: 177
-                f: 180
-                f: 183
-                f: 186
-                f: 188
-                f: 191
-                f: 194
-                f: 197
-                f: 200
-                f: 203
-                f: 205
-                f: 208
-                f: 211
-                f: 214
-                f: 217
-                f: 219
-                f: 222
-                f: 225
-                f: 228
-                f: 231
-                f: 233
-                f: 236
-                f: 239
-                f: 242
-                f: 244
-                f: 247
-                f: 250
-                f: 253
-                f: 255
-                f: 258
-                f: 261
-                f: 264
-                f: 266
-                f: 269
-                f: 272
-                f: 275
-                f: 278
-                f: 280
-                f: 283
-                f: 286
-                f: 289
-                f: 292
-                f: 294
-                f: 297
-                f: 300
-                f: 303
-                f: 305
-                f: 308
-                f: 311
-                f: 314
-                f: 317
-                f: 319
-                f: 322
-                f: 325
-                f: 327
-                f: 330
-                f: 333
-                f: 336
-                f: 339
-                f: 341
-                f: 344
-                f: 347
-                f: 350
-                f: 353
-                f: 355
-                f: 358
-                f: 361
-                f: 364
-                f: 367
-                f: 370
-                f: 373
-                f: 375
-                f: 378
-                f: 381
-                f: 384
-                f: 387
-                f: 390
-                f: 393
-                f: 396
-                f: 399
-                f: 401
-                f: 404
-                f: 407
-                f: 410
-                f: 413
-                f: 416
-                f: 419
-                f: 422
-                f: 425
-                f: 428
-                f: 431
-                f: 434
-                f: 437
-                f: 440
-                f: 443
-                f: 446
-                f: 449
-                f: 452
-                f: 455
-                f: 458
-                f: 461
-                f: 464
-                f: 467
-                f: 470
-                f: 473
-                f: 476
-                f: 479
-                f: 483
-                f: 486
-                f: 489
-                f: 492
-                f: 495
-                f: 498
-                f: 501
-                f: 504
-                f: 507
-                f: 511
-                f: 514
-                f: 517
-                f: 520
-                f: 523
-                f: 526
-                f: 530
-                f: 533
-                f: 536
-                f: 539
-                f: 542
-                f: 545
-                f: 549
-                f: 552
-                f: 555
-                f: 558
-                f: 562
-                f: 565
-                f: 569
-                f: 572
-                f: 575
-                f: 579
-                f: 582
-                f: 585
-                f: 589
-                f: 592
-                f: 595
-                f: 599
-                f: 602
-                f: 605
-                f: 609
-                f: 612
-                f: 616
-                f: 620
-                f: 623
-                f: 626
-                f: 630
-                f: 634
-                f: 637
-                f: 641
-                f: 644
-                f: 648
-                f: 651
-                f: 655
-                f: 658
-                f: 662
-                f: 665
-                f: 669
-                f: 672
-                f: 676
-                f: 680
-                f: 683
-                f: 687
-                f: 691
-                f: 694
-                f: 698
-                f: 702
-                f: 705
-                f: 709
-                f: 712
-                f: 716
-                f: 720
-                f: 724
-                f: 727
-                f: 731
-                f: 735
-                f: 739
-                f: 742
-                f: 746
-                f: 750
-                f: 754
-                f: 758
-                f: 761
-                f: 765
-                f: 769
-                f: 773
-                f: 777
-                f: 780
-                f: 784
-                f: 788
-                f: 792
-                f: 796
-                f: 800
-                f: 804
-                f: 808
-                f: 812
-                f: 816
-                f: 820
-                f: 823
-                f: 828
-                f: 832
-                f: 836
-                f: 840
-                f: 844
-                f: 848
-                f: 852
-                f: 856
-                f: 860
-                f: 864
-                f: 868
-                f: 873
-                f: 877
-                f: 881
-                f: 885
-                f: 889
-                f: 893
-                f: 897
-                f: 902
-                f: 906
-                f: 910
-                f: 914
-                f: 919
-                f: 923
-                f: 927
-                f: 931
-                f: 935
-                f: 940
-                f: 944
-                f: 948
-                f: 953
-                f: 957
-                f: 962
-                f: 966
-                f: 970
-                f: 975
-                f: 979
-                f: 984
-                f: 988
-                f: 993
-                f: 997
-                f: 1002
-                f: 1006
-                f: 1011
-                f: 1015
-                f: 1020
-                f: 1024
-                f: 1029
-                f: 1034
-                f: 1038
-                f: 1043
-                f: 1047
-                f: 1052
-                f: 1057
-                f: 1062
-                f: 1066
-                f: 1071
-                f: 1076
-                f: 1081
-                f: 1086
-                f: 1090
-                f: 1095
-                f: 1100
-                f: 1105
-                f: 1110
-                f: 1114
-                f: 1119
-                f: 1124
-                f: 1129
-                f: 1134
-                f: 1139
-                f: 1144
-                f: 1149
-                f: 1154
-                f: 1159
-                f: 1164
-                f: 1169
-                f: 1174
-                f: 1179
-                f: 1184
-                f: 1189
-                f: 1194
-                f: 1199
-                f: 1204
-                f: 1209
-                f: 1215
-                f: 1220
-                f: 1225
-                f: 1230
-                f: 1235
-                f: 1241
-                f: 1246
-                f: 1251
-                f: 1257
-                f: 1262
-                f: 1267
-                f: 1273
-                f: 1278
-                f: 1284
-                f: 1289
-                f: 1294
-                f: 1300
-                f: 1305
-                f: 1311
-                f: 1316
-                f: 1322
-                f: 1327
-                f: 1333
-                f: 1338
-                f: 1344
-                f: 1350
-                f: 1355
-                f: 1361
-                f: 1367
-                f: 1372
-                f: 1378
-                f: 1383
-                f: 1389
-                f: 1395
-                f: 1401
-                f: 1407
-                f: 1413
-                f: 1418
-                f: 1424
-                f: 1430
-                f: 1436
-                f: 1442
-                f: 1448
-                f: 1454
-                f: 1459
-                f: 1465
-                f: 1472
-                f: 1477
-                f: 1483
-                f: 1489
-                f: 1495
-                f: 1501
-                f: 1507
-                f: 1514
-                f: 1520
-                f: 1526
-                f: 1532
-                f: 1538
-                f: 1545
-                f: 1551
-                f: 1557
-                f: 1564
-                f: 1570
-                f: 1576
-                f: 1583
-                f: 1589
-                f: 1596
-                f: 1602
-                f: 1608
-                f: 1615
-                f: 1621
-                f: 1628
-                f: 1634
-                f: 1641
-                f: 1647
-                f: 1654
-                f: 1661
-                f: 1667
-                f: 1674
-                f: 1681
-                f: 1687
-                f: 1694
-                f: 1701
-                f: 1708
-                f: 1715
-                f: 1722
-                f: 1729
-                f: 1735
-                f: 1742
-                f: 1749
-                f: 1756
-                f: 1763
-                f: 1770
-                f: 1777
-                f: 1784
-                f: 1791
-                f: 1798
-                f: 1806
-                f: 1812
-                f: 1820
-                f: 1827
-                f: 1835
-                f: 1841
-                f: 1849
-                f: 1856
-                f: 1863
-                f: 1871
-                f: 1878
-                f: 1885
-                f: 1893
-                f: 1901
-                f: 1908
-                f: 1915
-                f: 1923
-                f: 1930
-                f: 1938
-                f: 1946
-                f: 1953
-                f: 1961
-                f: 1969
-                f: 1976
-                f: 1984
-                f: 1992
-                f: 2000
-                f: 2007
-                f: 2015
-                f: 2023
-                f: 2031
-                f: 2039
-                f: 2047
-                f: 2055
-                f: 2063
-                f: 2071
-                f: 2079
-                f: 2087
-                f: 2095
-                f: 2104
-                f: 2112
-                f: 2120
-                f: 2128
-                f: 2137
-                f: 2146
-                f: 2154
-                f: 2162
-                f: 2171
-                f: 2179
-                f: 2188
-                f: 2197
-                f: 2205
-                f: 2214
-                f: 2223
-                f: 2232
-                f: 2241
-                f: 2250
-                f: 2258
-                f: 2268
-                f: 2277
-                f: 2285
-                f: 2294
-                f: 2304
-                f: 2313
-                f: 2322
-                f: 2331
-                f: 2340
-                f: 2350
-                f: 2359
-                f: 2368
-                f: 2378
-                f: 2388
-                f: 2397
-                f: 2407
-                f: 2416
-                f: 2426
-                f: 2436
-                f: 2446
-                f: 2455
-                f: 2465
-                f: 2475
-                f: 2485
-                f: 2495
-                f: 2505
-                f: 2515
-                f: 2525
-                f: 2535
-                f: 2545
-                f: 2556
-                f: 2566
-                f: 2577
-                f: 2587
-                f: 2598
-                f: 2609
-                f: 2620
-                f: 2631
-                f: 2641
-                f: 2652
-                f: 2663
-                f: 2674
-                f: 2685
-                f: 2696
-                f: 2708
-                f: 2719
-                f: 2730
-                f: 2742
-                f: 2753
-                f: 2764
-                f: 2776
-                f: 2788
-                f: 2799
-                f: 2811
-                f: 2823
-                f: 2835
-                f: 2847
-                f: 2858
-                f: 2870
-                f: 2882
-                f: 2894
-                f: 2906
-                f: 2919
-                f: 2931
-                f: 2943
-                f: 2956
-                f: 2968
-                f: 2981
-                f: 2994
-                f: 3006
-                f: 3019
-                f: 3032
-                f: 3045
-                f: 3058
-                f: 3070
-                f: 3083
-                f: 3096
-                f: 3109
-                f: 3121
-                f: 3134
-                f: 3148
-                f: 3161
-                f: 3174
-                f: 3187
-                f: 3200
-                f: 3214
-                f: 3228
-                f: 3242
-                f: 3255
-                f: 3268
-                f: 3283
-                f: 3297
-                f: 3310
-                f: 3325
-                f: 3340
-                f: 3353
-                f: 3368
-                f: 3383
-                f: 3398
-                f: 3412
-                f: 3427
-                f: 3442
-                f: 3457
-                f: 3471
-                f: 3487
-                f: 3502
-                f: 3516
-                f: 3531
-                f: 3546
-                f: 3561
-                f: 3577
-                f: 3593
-                f: 3608
-                f: 3625
-                f: 3641
-                f: 3657
-                f: 3673
-                f: 3690
-                f: 3706
-                f: 3722
-                f: 3738
-                f: 3755
-                f: 3772
-                f: 3789
-                f: 3805
-                f: 3823
-                f: 3839
-                f: 3856
-                f: 3873
-                f: 3891
-                f: 3908
-                f: 3926
-                f: 3944
-                f: 3960
-                f: 3977
-                f: 3995
-                f: 4013
-                f: 4031
-                f: 4048
-                f: 4067
-                f: 4085
-                f: 4104
-                f: 4122
-                f: 4140
-                f: 4159
-                f: 4177
-                f: 4196
-                f: 4215
-                f: 4234
-                f: 4253
-                f: 4272
-                f: 4291
-                f: 4311
-                f: 4332
-                f: 4351
-                f: 4371
-                f: 4391
-                f: 4412
-                f: 4433
-                f: 4454
-                f: 4474
-                f: 4496
-                f: 4518
-                f: 4538
-                f: 4558
-                f: 4579
-                f: 4601
-                f: 4619
-                f: 4640
-                f: 4662
-                f: 4684
-                f: 4706
-                f: 4728
-                f: 4751
-                f: 4771
-                f: 4794
-                f: 4818
-                f: 4840
-                f: 4863
-                f: 4887
-                f: 4910
-                f: 4933
-                f: 4956
-                f: 4980
-                f: 5004
-                f: 5028
-                f: 5052
-                f: 5076
-                f: 5100
-                f: 5125
-                f: 5152
-                f: 5175
-                f: 5200
-                f: 5226
-                f: 5251
-                f: 5278
-                f: 5304
-                f: 5329
-                f: 5354
-                f: 5381
-                f: 5407
-                f: 5433
-                f: 5460
-                f: 5488
-                f: 5516
-                f: 5544
-                f: 5573
-                f: 5600
-                f: 5628
-                f: 5656
-                f: 5684
-                f: 5713
-                f: 5741
-                f: 5771
-                f: 5799
-                f: 5830
-                f: 5860
-                f: 5891
-                f: 5921
-                f: 5951
-                f: 5980
-                f: 6010
-                f: 6041
-                f: 6073
-                f: 6105
-                f: 6133
-                f: 6163
-                f: 6195
-                f: 6227
-                f: 6258
-                f: 6291
-                f: 6322
-                f: 6356
-                f: 6390
-                f: 6424
-                f: 6457
-                f: 6491
-                f: 6527
-                f: 6561
-                f: 6596
-                f: 6631
-                f: 6665
-                f: 6701
-                f: 6736
-                f: 6771
-                f: 6805
-                f: 6840
-                f: 6877
-                f: 6911
-                f: 6947
-                f: 6985
-                f: 7022
-                f: 7059
-                f: 7097
-                f: 7135
-                f: 7174
-                f: 7212
-                f: 7251
-                f: 7289
-                f: 7327
-                f: 7366
-                f: 7406
-                f: 7447
-                f: 7486
-                f: 7525
-                f: 7566
-                f: 7606
-                f: 7646
-                f: 7688
-                f: 7728
-                f: 7771
-                f: 7814
-                f: 7859
-                f: 7901
-                f: 7949
-                f: 7992
-                f: 8036
-                f: 8082
-                f: 8127
-                f: 8173
-                f: 8218
-                f: 8262
-                f: 8309
-                f: 8353
-                f: 8397
-                f: 8444
-                f: 8489
-                f: 8539
-                f: 8585
-                f: 8632
-                f: 8682
-                f: 8727
-                f: 8777
-                f: 8828
-                f: 8879
-                f: 8929
-                f: 8982
-                f: 9037
-                f: 9087
-                f: 9140
-                f: 9193
-                f: 9250
-                f: 9305
-                f: 9361
-                f: 9418
-                f: 9475
-                f: 9532
-                f: 9589
-                f: 9644
-                f: 9699
-                f: 9758
-                f: 9818
-                f: 9875
-                f: 9935
-                f: 9997
-                f: 10057
-                f: 10117
-                f: 10174
-                f: 10232
-                f: 10296
-                f: 10356
-                f: 10419
-                f: 10482
-                f: 10546
-                f: 10608
-                f: 10670
-                f: 10729
-                f: 10790
-                f: 10855
-                f: 10920
-                f: 10990
-                f: 11054
-                f: 11118
-                f: 11181
-                f: 11248
-                f: 11316
-                f: 11385
-                f: 11454
-                f: 11526
-                f: 11597
-                f: 11667
-                f: 11740
-                f: 11820
-                f: 11897
-                f: 11973
-                f: 12046
-                f: 12126
-                f: 12204
-                f: 12287
-                f: 12370
-                f: 12456
-                f: 12538
-                f: 12627
-                f: 12714
-                f: 12799
-                f: 12883
-                f: 12971
-                f: 13062
-                f: 13154
-                f: 13233
-                f: 13328
-                f: 13418
-                f: 13511
-                f: 13607
-                f: 13709
-                f: 13806
-                f: 13903
-                f: 14002
-                f: 14104
-                f: 14200
-                f: 14288
-                f: 14391
-                f: 14488
-                f: 14590
-                f: 14698
-                f: 14808
-                f: 14910
-                f: 15020
-                f: 15126
-                f: 15238
-                f: 15347
-                f: 15456
-                f: 15574
-                f: 15692
-                f: 15786
-                f: 15896
-                f: 16016
-                f: 16136
-                f: 16250
-                f: 16352
-                f: 16474
-                f: 16575
-                f: 16702
-                f: 16835
-                f: 16965
-                f: 17096
-                f: 17232
-                f: 17370
-                f: 17443
-                f: 17581
-                f: 17719
-                f: 17864
-                f: 17976
-                f: 18116
-                f: 18250
-                f: 18396
-                f: 18540
-                f: 18690
-                f: 18840
-                f: 18989
-                f: 19136
-                f: 19294
-                f: 19445
-                f: 19589
-                f: 19750
-                f: 19905
-                f: 20064
-                f: 20191
-                f: 20325
-                f: 20497
-                f: 20662
-                f: 20833
-                f: 20981
-                f: 21152
-                f: 21334
-                f: 21510
-                f: 21642
-                f: 21821
-                f: 22001
-                f: 22186
-                f: 22379
-                f: 22568
-                f: 22770
-                f: 22958
-                f: 23162
-                f: 23360
-                f: 23524
-                f: 23737
-                f: 23960
-                f: 24175
-                f: 24395
-                f: 24631
-                f: 24865
-                f: 25091
-                f: 25327
-                f: 25580
-                f: 25833
-                f: 26089
-                f: 26361
-                f: 26636
-                f: 26889
-                f: 27155
-                f: 27436
-                f: 27715
-                f: 28003
-                f: 28303
-                f: 28600
-                f: 28916
-                f: 29223
-                f: 29553
-                f: 29884
-                f: 30200
-                f: 30538
-                f: 30868
-                f: 31211
-                f: 31548
-                f: 31881
-                f: 32253
-                f: 32605
-                f: 32980
-                f: 33385
-                f: 33805
-                f: 34254
-                f: 34723
-                f: 35167
-                f: 35666
-                f: 36125
-                f: 36652
-                f: 37177
-                f: 37739
-                f: 38321
-                f: 38932
-                f: 39640
-                f: 40337
-                f: 41000
-                f: 41626
-                f: 42385
-                f: 43122
-                f: 43890
-                f: 44687
-                f: 45609
-                f: 46520
-                f: 47489
-                f: 48432
-                f: 49458
-                f: 50511
-                f: 51561
-                f: 52568
-                f: 53676
-                f: 54936
-                f: 56071
-                f: 57302
-                f: 58513
-                f: 59800
-                f: 61192
-                f: 62702
-                f: 64205
-                f: 65868
-                f: 67780
-                f: 69960
-                f: 72330
-                f: 74918
-                f: 77540
-                f: 80344
-                f: 83727
-                f: 87662
-                f: 93589
-                f: 101441
-                f: 110544
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_8/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_8/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_8/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_8/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_8/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_8/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_8/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_8/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_8/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_8/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_8/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_8/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_9/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_9/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_9/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_12"
-          input: "QNetwork/EncodingNetwork/lambda_9/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_9/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_9/zeros_like"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-                float_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_9/zeros_like"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_13"
-          input: "QNetwork/EncodingNetwork/lambda_10/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_10/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 13
-                f: 38
-                f: 56
-                f: 70
-                f: 82
-                f: 94
-                f: 104
-                f: 114
-                f: 123
-                f: 131
-                f: 139
-                f: 148
-                f: 152
-                f: 153
-                f: 158
-                f: 163
-                f: 170
-                f: 174
-                f: 178
-                f: 180
-                f: 183
-                f: 186
-                f: 188
-                f: 190
-                f: 192
-                f: 196
-                f: 198
-                f: 201
-                f: 205
-                f: 208
-                f: 212
-                f: 215
-                f: 219
-                f: 221
-                f: 225
-                f: 227
-                f: 229
-                f: 232
-                f: 233
-                f: 236
-                f: 239
-                f: 242
-                f: 245
-                f: 248
-                f: 250
-                f: 252
-                f: 254
-                f: 256
-                f: 259
-                f: 261
-                f: 264
-                f: 267
-                f: 270
-                f: 272
-                f: 275
-                f: 278
-                f: 280
-                f: 283
-                f: 285
-                f: 287
-                f: 290
-                f: 293
-                f: 295
-                f: 297
-                f: 300
-                f: 303
-                f: 305
-                f: 308
-                f: 311
-                f: 313
-                f: 316
-                f: 319
-                f: 322
-                f: 325
-                f: 329
-                f: 331
-                f: 333
-                f: 336
-                f: 338
-                f: 340
-                f: 343
-                f: 345
-                f: 347
-                f: 347
-                f: 349
-                f: 351
-                f: 353
-                f: 355
-                f: 357
-                f: 359
-                f: 361
-                f: 363
-                f: 365
-                f: 368
-                f: 369
-                f: 371
-                f: 373
-                f: 375
-                f: 377
-                f: 380
-                f: 382
-                f: 385
-                f: 387
-                f: 389
-                f: 391
-                f: 394
-                f: 396
-                f: 398
-                f: 400
-                f: 403
-                f: 405
-                f: 408
-                f: 410
-                f: 412
-                f: 415
-                f: 417
-                f: 420
-                f: 422
-                f: 425
-                f: 427
-                f: 429
-                f: 432
-                f: 434
-                f: 437
-                f: 439
-                f: 442
-                f: 444
-                f: 446
-                f: 449
-                f: 451
-                f: 454
-                f: 456
-                f: 458
-                f: 461
-                f: 463
-                f: 466
-                f: 469
-                f: 472
-                f: 474
-                f: 476
-                f: 479
-                f: 482
-                f: 483
-                f: 486
-                f: 489
-                f: 492
-                f: 495
-                f: 498
-                f: 500
-                f: 503
-                f: 505
-                f: 508
-                f: 510
-                f: 513
-                f: 516
-                f: 519
-                f: 522
-                f: 524
-                f: 528
-                f: 530
-                f: 533
-                f: 536
-                f: 539
-                f: 541
-                f: 544
-                f: 547
-                f: 550
-                f: 553
-                f: 556
-                f: 559
-                f: 561
-                f: 563
-                f: 567
-                f: 570
-                f: 572
-                f: 575
-                f: 577
-                f: 580
-                f: 584
-                f: 586
-                f: 589
-                f: 592
-                f: 595
-                f: 598
-                f: 601
-                f: 605
-                f: 607
-                f: 611
-                f: 613
-                f: 617
-                f: 620
-                f: 623
-                f: 626
-                f: 629
-                f: 632
-                f: 635
-                f: 639
-                f: 642
-                f: 645
-                f: 648
-                f: 651
-                f: 654
-                f: 657
-                f: 660
-                f: 662
-                f: 666
-                f: 669
-                f: 672
-                f: 676
-                f: 679
-                f: 682
-                f: 685
-                f: 688
-                f: 690
-                f: 693
-                f: 696
-                f: 699
-                f: 702
-                f: 705
-                f: 709
-                f: 712
-                f: 714
-                f: 718
-                f: 721
-                f: 724
-                f: 726
-                f: 728
-                f: 729
-                f: 731
-                f: 734
-                f: 737
-                f: 741
-                f: 745
-                f: 748
-                f: 750
-                f: 753
-                f: 756
-                f: 760
-                f: 763
-                f: 766
-                f: 770
-                f: 773
-                f: 776
-                f: 779
-                f: 782
-                f: 786
-                f: 788
-                f: 793
-                f: 796
-                f: 798
-                f: 802
-                f: 805
-                f: 808
-                f: 811
-                f: 815
-                f: 818
-                f: 820
-                f: 824
-                f: 827
-                f: 829
-                f: 832
-                f: 835
-                f: 838
-                f: 842
-                f: 846
-                f: 849
-                f: 854
-                f: 857
-                f: 860
-                f: 864
-                f: 867
-                f: 871
-                f: 875
-                f: 879
-                f: 882
-                f: 887
-                f: 890
-                f: 893
-                f: 897
-                f: 901
-                f: 905
-                f: 908
-                f: 911
-                f: 915
-                f: 918
-                f: 921
-                f: 925
-                f: 929
-                f: 932
-                f: 934
-                f: 937
-                f: 940
-                f: 943
-                f: 946
-                f: 950
-                f: 953
-                f: 956
-                f: 961
-                f: 965
-                f: 969
-                f: 973
-                f: 976
-                f: 980
-                f: 982
-                f: 985
-                f: 990
-                f: 994
-                f: 997
-                f: 1001
-                f: 1005
-                f: 1007
-                f: 1010
-                f: 1014
-                f: 1018
-                f: 1022
-                f: 1025
-                f: 1028
-                f: 1033
-                f: 1035
-                f: 1038
-                f: 1042
-                f: 1047
-                f: 1052
-                f: 1056
-                f: 1060
-                f: 1063
-                f: 1067
-                f: 1071
-                f: 1075
-                f: 1079
-                f: 1083
-                f: 1086
-                f: 1088
-                f: 1092
-                f: 1097
-                f: 1102
-                f: 1106
-                f: 1109
-                f: 1113
-                f: 1117
-                f: 1120
-                f: 1125
-                f: 1129
-                f: 1134
-                f: 1137
-                f: 1142
-                f: 1146
-                f: 1150
-                f: 1151
-                f: 1155
-                f: 1159
-                f: 1162
-                f: 1166
-                f: 1170
-                f: 1174
-                f: 1177
-                f: 1181
-                f: 1185
-                f: 1188
-                f: 1193
-                f: 1196
-                f: 1203
-                f: 1207
-                f: 1212
-                f: 1214
-                f: 1217
-                f: 1220
-                f: 1222
-                f: 1222
-                f: 1226
-                f: 1229
-                f: 1233
-                f: 1237
-                f: 1241
-                f: 1246
-                f: 1250
-                f: 1253
-                f: 1257
-                f: 1262
-                f: 1267
-                f: 1272
-                f: 1278
-                f: 1283
-                f: 1287
-                f: 1293
-                f: 1297
-                f: 1301
-                f: 1304
-                f: 1309
-                f: 1315
-                f: 1320
-                f: 1325
-                f: 1329
-                f: 1333
-                f: 1336
-                f: 1341
-                f: 1344
-                f: 1348
-                f: 1351
-                f: 1357
-                f: 1363
-                f: 1368
-                f: 1374
-                f: 1379
-                f: 1383
-                f: 1386
-                f: 1391
-                f: 1395
-                f: 1399
-                f: 1403
-                f: 1407
-                f: 1410
-                f: 1415
-                f: 1418
-                f: 1423
-                f: 1428
-                f: 1432
-                f: 1436
-                f: 1438
-                f: 1442
-                f: 1446
-                f: 1450
-                f: 1454
-                f: 1462
-                f: 1467
-                f: 1472
-                f: 1477
-                f: 1483
-                f: 1488
-                f: 1492
-                f: 1496
-                f: 1503
-                f: 1508
-                f: 1513
-                f: 1518
-                f: 1520
-                f: 1526
-                f: 1531
-                f: 1534
-                f: 1538
-                f: 1542
-                f: 1546
-                f: 1552
-                f: 1558
-                f: 1564
-                f: 1568
-                f: 1573
-                f: 1578
-                f: 1581
-                f: 1590
-                f: 1596
-                f: 1601
-                f: 1606
-                f: 1611
-                f: 1616
-                f: 1622
-                f: 1629
-                f: 1634
-                f: 1640
-                f: 1647
-                f: 1651
-                f: 1657
-                f: 1660
-                f: 1665
-                f: 1672
-                f: 1678
-                f: 1686
-                f: 1692
-                f: 1698
-                f: 1704
-                f: 1709
-                f: 1714
-                f: 1719
-                f: 1724
-                f: 1730
-                f: 1737
-                f: 1744
-                f: 1751
-                f: 1755
-                f: 1761
-                f: 1764
-                f: 1772
-                f: 1778
-                f: 1784
-                f: 1789
-                f: 1799
-                f: 1804
-                f: 1811
-                f: 1819
-                f: 1825
-                f: 1830
-                f: 1838
-                f: 1849
-                f: 1858
-                f: 1862
-                f: 1868
-                f: 1872
-                f: 1878
-                f: 1885
-                f: 1888
-                f: 1892
-                f: 1897
-                f: 1902
-                f: 1907
-                f: 1919
-                f: 1926
-                f: 1932
-                f: 1936
-                f: 1941
-                f: 1946
-                f: 1952
-                f: 1960
-                f: 1968
-                f: 1977
-                f: 1985
-                f: 1992
-                f: 1997
-                f: 2006
-                f: 2012
-                f: 2018
-                f: 2026
-                f: 2034
-                f: 2044
-                f: 2050
-                f: 2057
-                f: 2064
-                f: 2069
-                f: 2075
-                f: 2082
-                f: 2091
-                f: 2098
-                f: 2107
-                f: 2122
-                f: 2126
-                f: 2135
-                f: 2146
-                f: 2149
-                f: 2157
-                f: 2163
-                f: 2172
-                f: 2178
-                f: 2184
-                f: 2191
-                f: 2198
-                f: 2208
-                f: 2216
-                f: 2223
-                f: 2235
-                f: 2242
-                f: 2252
-                f: 2263
-                f: 2272
-                f: 2277
-                f: 2288
-                f: 2296
-                f: 2306
-                f: 2311
-                f: 2318
-                f: 2323
-                f: 2334
-                f: 2341
-                f: 2356
-                f: 2366
-                f: 2373
-                f: 2379
-                f: 2386
-                f: 2407
-                f: 2416
-                f: 2423
-                f: 2432
-                f: 2438
-                f: 2448
-                f: 2453
-                f: 2464
-                f: 2473
-                f: 2473
-                f: 2481
-                f: 2492
-                f: 2504
-                f: 2511
-                f: 2523
-                f: 2529
-                f: 2537
-                f: 2545
-                f: 2556
-                f: 2566
-                f: 2575
-                f: 2584
-                f: 2592
-                f: 2602
-                f: 2613
-                f: 2624
-                f: 2636
-                f: 2643
-                f: 2647
-                f: 2652
-                f: 2664
-                f: 2675
-                f: 2688
-                f: 2693
-                f: 2702
-                f: 2709
-                f: 2722
-                f: 2739
-                f: 2754
-                f: 2766
-                f: 2776
-                f: 2786
-                f: 2799
-                f: 2810
-                f: 2832
-                f: 2840
-                f: 2849
-                f: 2860
-                f: 2873
-                f: 2889
-                f: 2908
-                f: 2914
-                f: 2926
-                f: 2939
-                f: 2950
-                f: 2961
-                f: 2969
-                f: 2978
-                f: 2990
-                f: 2999
-                f: 3023
-                f: 3032
-                f: 3049
-                f: 3066
-                f: 3085
-                f: 3101
-                f: 3107
-                f: 3117
-                f: 3129
-                f: 3144
-                f: 3167
-                f: 3190
-                f: 3212
-                f: 3229
-                f: 3238
-                f: 3264
-                f: 3293
-                f: 3302
-                f: 3309
-                f: 3314
-                f: 3323
-                f: 3344
-                f: 3352
-                f: 3362
-                f: 3390
-                f: 3400
-                f: 3411
-                f: 3435
-                f: 3456
-                f: 3470
-                f: 3485
-                f: 3498
-                f: 3505
-                f: 3519
-                f: 3539
-                f: 3545
-                f: 3545
-                f: 3560
-                f: 3576
-                f: 3597
-                f: 3607
-                f: 3621
-                f: 3641
-                f: 3665
-                f: 3679
-                f: 3701
-                f: 3714
-                f: 3733
-                f: 3741
-                f: 3745
-                f: 3757
-                f: 3773
-                f: 3787
-                f: 3795
-                f: 3805
-                f: 3822
-                f: 3835
-                f: 3844
-                f: 3861
-                f: 3872
-                f: 3878
-                f: 3897
-                f: 3919
-                f: 3941
-                f: 3971
-                f: 4004
-                f: 4014
-                f: 4019
-                f: 4061
-                f: 4068
-                f: 4089
-                f: 4108
-                f: 4117
-                f: 4125
-                f: 4146
-                f: 4165
-                f: 4194
-                f: 4204
-                f: 4224
-                f: 4236
-                f: 4263
-                f: 4290
-                f: 4301
-                f: 4319
-                f: 4326
-                f: 4347
-                f: 4369
-                f: 4386
-                f: 4413
-                f: 4435
-                f: 4451
-                f: 4451
-                f: 4451
-                f: 4476
-                f: 4500
-                f: 4539
-                f: 4579
-                f: 4592
-                f: 4600
-                f: 4622
-                f: 4650
-                f: 4683
-                f: 4714
-                f: 4742
-                f: 4755
-                f: 4771
-                f: 4788
-                f: 4816
-                f: 4828
-                f: 4831
-                f: 4831
-                f: 4831
-                f: 4843
-                f: 4852
-                f: 4865
-                f: 4896
-                f: 4915
-                f: 4931
-                f: 4952
-                f: 4965
-                f: 4983
-                f: 5007
-                f: 5043
-                f: 5061
-                f: 5081
-                f: 5095
-                f: 5122
-                f: 5143
-                f: 5171
-                f: 5204
-                f: 5226
-                f: 5233
-                f: 5250
-                f: 5281
-                f: 5320
-                f: 5323
-                f: 5328
-                f: 5345
-                f: 5374
-                f: 5413
-                f: 5466
-                f: 5492
-                f: 5524
-                f: 5555
-                f: 5567
-                f: 5610
-                f: 5676
-                f: 5701
-                f: 5716
-                f: 5744
-                f: 5768
-                f: 5795
-                f: 5818
-                f: 5854
-                f: 5906
-                f: 5934
-                f: 5960
-                f: 5975
-                f: 5993
-                f: 6025
-                f: 6034
-                f: 6051
-                f: 6082
-                f: 6106
-                f: 6125
-                f: 6159
-                f: 6187
-                f: 6242
-                f: 6287
-                f: 6311
-                f: 6332
-                f: 6348
-                f: 6358
-                f: 6368
-                f: 6377
-                f: 6402
-                f: 6407
-                f: 6428
-                f: 6450
-                f: 6475
-                f: 6498
-                f: 6505
-                f: 6533
-                f: 6565
-                f: 6580
-                f: 6595
-                f: 6611
-                f: 6654
-                f: 6658
-                f: 6705
-                f: 6751
-                f: 6786
-                f: 6828
-                f: 6876
-                f: 6896
-                f: 6948
-                f: 6964
-                f: 7065
-                f: 7082
-                f: 7118
-                f: 7184
-                f: 7214
-                f: 7271
-                f: 7310
-                f: 7357
-                f: 7405
-                f: 7506
-                f: 7613
-                f: 7641
-                f: 7675
-                f: 7720
-                f: 7781
-                f: 7833
-                f: 7860
-                f: 7898
-                f: 7929
-                f: 8044
-                f: 8104
-                f: 8148
-                f: 8236
-                f: 8273
-                f: 8313
-                f: 8349
-                f: 8381
-                f: 8409
-                f: 8498
-                f: 8507
-                f: 8524
-                f: 8570
-                f: 8607
-                f: 8630
-                f: 8637
-                f: 8675
-                f: 8700
-                f: 8714
-                f: 8734
-                f: 8776
-                f: 8836
-                f: 8854
-                f: 8867
-                f: 8868
-                f: 9065
-                f: 9113
-                f: 9121
-                f: 9241
-                f: 9357
-                f: 9360
-                f: 9585
-                f: 9613
-                f: 9684
-                f: 9727
-                f: 9751
-                f: 9777
-                f: 9802
-                f: 9889
-                f: 9903
-                f: 9914
-                f: 9978
-                f: 10061
-                f: 10192
-                f: 10213
-                f: 10345
-                f: 10369
-                f: 10404
-                f: 10430
-                f: 10471
-                f: 10481
-                f: 10489
-                f: 10492
-                f: 10494
-                f: 10524
-                f: 10554
-                f: 10557
-                f: 10560
-                f: 10562
-                f: 10641
-                f: 10716
-                f: 10842
-                f: 10897
-                f: 10967
-                f: 11053
-                f: 11128
-                f: 11137
-                f: 11328
-                f: 11336
-                f: 11401
-                f: 11532
-                f: 11573
-                f: 11860
-                f: 11880
-                f: 12013
-                f: 12305
-                f: 12358
-                f: 12386
-                f: 12404
-                f: 12456
-                f: 12456
-                f: 12476
-                f: 12615
-                f: 12677
-                f: 12981
-                f: 13094
-                f: 13197
-                f: 13708
-                f: 13717
-                f: 13788
-                f: 14049
-                f: 14112
-                f: 14224
-                f: 14257
-                f: 14681
-                f: 14901
-                f: 15006
-                f: 15071
-                f: 15100
-                f: 15248
-                f: 15669
-                f: 15877
-                f: 15953
-                f: 15953
-                f: 16066
-                f: 16072
-                f: 16271
-                f: 16292
-                f: 16386
-                f: 16490
-                f: 16633
-                f: 16670
-                f: 16834
-                f: 16896
-                f: 17543
-                f: 17693
-                f: 17800
-                f: 17859
-                f: 18397
-                f: 18811
-                f: 18826
-                f: 18971
-                f: 19304
-                f: 19319
-                f: 19695
-                f: 20378
-                f: 20865
-                f: 21313
-                f: 21330
-                f: 22321
-                f: 22760
-                f: 22770
-                f: 23783
-                f: 23785
-                f: 24525
-                f: 24844
-                f: 24848
-                f: 24964
-                f: 24966
-                f: 27468
-                f: 27478
-                f: 27555
-                f: 27555
-                f: 28215
-                f: 28219
-                f: 28336
-                f: 28490
-                f: 30213
-                f: 30228
-                f: 30242
-                f: 34116
-                f: 43518
-                f: 43518
-                f: 43518
-                f: 43852
-                f: 43852
-                f: 43852
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_10/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_10/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_10/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_10/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_10/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_10/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_10/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_10/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_10/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_10/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_10/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_10/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/expand_dims/ExpandDims/dim"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/expand_dims/ExpandDims/dim"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/expand_dims/ExpandDims"
-          op: "ExpandDims"
-          input: "time_step_14"
-          input: "QNetwork/EncodingNetwork/lambda_11/expand_dims/ExpandDims/dim:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/expand_dims/ExpandDims"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/Bucketize"
-          op: "Bucketize"
-          input: "QNetwork/EncodingNetwork/lambda_11/expand_dims/ExpandDims:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "boundaries"
-            value {
-              list {
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 0
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 1
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 2
-                f: 3
-                f: 4
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/Bucketize"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/Cast"
-          op: "Cast"
-          input: "QNetwork/EncodingNetwork/lambda_11/Bucketize:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/Cast"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/truediv/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                }
-                float_val: 999
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/truediv/y"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/truediv"
-          op: "RealDiv"
-          input: "QNetwork/EncodingNetwork/lambda_11/Cast:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_11/truediv/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/truediv"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/Sqrt"
-          op: "Sqrt"
-          input: "QNetwork/EncodingNetwork/lambda_11/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/Sqrt"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/mul"
-          op: "Mul"
-          input: "QNetwork/EncodingNetwork/lambda_11/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_11/truediv:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/mul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/lambda_11/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda_11/truediv:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_11/Sqrt:y:0"
-          input: "QNetwork/EncodingNetwork/lambda_11/mul:z:0"
-          input: "QNetwork/EncodingNetwork/lambda_11/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 3
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/lambda_11/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/concatenate/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: 1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/concatenate/concat/axis"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/concatenate/concat"
-          op: "ConcatV2"
-          input: "QNetwork/EncodingNetwork/lambda/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_1/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_2/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_3/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_4/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_5/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_6/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_7/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_8/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_9/zeros_like:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_10/concat:output:0"
-          input: "QNetwork/EncodingNetwork/lambda_11/concat:output:0"
-          input: "QNetwork/EncodingNetwork/concatenate/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 12
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 34
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/concatenate/concat"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/flatten/Const"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                    size: 2
-                  }
-                }
-                tensor_content: "\377\377\377\377\"\000\000\000"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/flatten/Const"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/flatten/Reshape"
-          op: "Reshape"
-          input: "QNetwork/EncodingNetwork/concatenate/concat:output:0"
-          input: "QNetwork/EncodingNetwork/flatten/Const:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 34
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/flatten/Reshape"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense/MatMul/ReadVariableOp"
-          op: "ReadVariableOp"
-          input: "qnetwork_encodingnetwork_dense_matmul_readvariableop_resource"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 34
-                  }
-                  dim {
-                    size: 100
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense/MatMul/ReadVariableOp"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense/MatMul"
-          op: "MatMul"
-          input: "QNetwork/EncodingNetwork/flatten/Reshape:output:0"
-          input: "QNetwork/EncodingNetwork/dense/MatMul/ReadVariableOp:value:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 100
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense/MatMul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense/BiasAdd/ReadVariableOp"
-          op: "ReadVariableOp"
-          input: "qnetwork_encodingnetwork_dense_biasadd_readvariableop_resource"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 100
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense/BiasAdd/ReadVariableOp"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense/BiasAdd"
-          op: "BiasAdd"
-          input: "QNetwork/EncodingNetwork/dense/MatMul:product:0"
-          input: "QNetwork/EncodingNetwork/dense/BiasAdd/ReadVariableOp:value:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 100
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense/BiasAdd"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense/Relu"
-          op: "Relu"
-          input: "QNetwork/EncodingNetwork/dense/BiasAdd:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 100
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense/Relu"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense_1/MatMul/ReadVariableOp"
-          op: "ReadVariableOp"
-          input: "qnetwork_encodingnetwork_dense_1_matmul_readvariableop_resource"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 100
-                  }
-                  dim {
-                    size: 40
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense_1/MatMul/ReadVariableOp"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense_1/MatMul"
-          op: "MatMul"
-          input: "QNetwork/EncodingNetwork/dense/Relu:activations:0"
-          input: "QNetwork/EncodingNetwork/dense_1/MatMul/ReadVariableOp:value:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 40
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense_1/MatMul"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense_1/BiasAdd/ReadVariableOp"
-          op: "ReadVariableOp"
-          input: "qnetwork_encodingnetwork_dense_1_biasadd_readvariableop_resource"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 40
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense_1/BiasAdd/ReadVariableOp"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense_1/BiasAdd"
-          op: "BiasAdd"
-          input: "QNetwork/EncodingNetwork/dense_1/MatMul:product:0"
-          input: "QNetwork/EncodingNetwork/dense_1/BiasAdd/ReadVariableOp:value:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 40
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense_1/BiasAdd"
-          }
-        }
-        node_def {
-          name: "QNetwork/EncodingNetwork/dense_1/Relu"
-          op: "Relu"
-          input: "QNetwork/EncodingNetwork/dense_1/BiasAdd:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 40
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/EncodingNetwork/dense_1/Relu"
-          }
-        }
-        node_def {
-          name: "QNetwork/dense_2/MatMul/ReadVariableOp"
-          op: "ReadVariableOp"
-          input: "qnetwork_dense_2_matmul_readvariableop_resource"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 40
-                  }
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/dense_2/MatMul/ReadVariableOp"
-          }
-        }
-        node_def {
-          name: "QNetwork/dense_2/MatMul"
-          op: "MatMul"
-          input: "QNetwork/EncodingNetwork/dense_1/Relu:activations:0"
-          input: "QNetwork/dense_2/MatMul/ReadVariableOp:value:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/dense_2/MatMul"
-          }
-        }
-        node_def {
-          name: "QNetwork/dense_2/BiasAdd/ReadVariableOp"
-          op: "ReadVariableOp"
-          input: "qnetwork_dense_2_biasadd_readvariableop_resource"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/dense_2/BiasAdd/ReadVariableOp"
-          }
-        }
-        node_def {
-          name: "QNetwork/dense_2/BiasAdd"
-          op: "BiasAdd"
-          input: "QNetwork/dense_2/MatMul:product:0"
-          input: "QNetwork/dense_2/BiasAdd/ReadVariableOp:value:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "QNetwork/dense_2/BiasAdd"
-          }
-        }
-        node_def {
-          name: "ShiftedCategorical_1/mode/ArgMax/dimension"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: -1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "ShiftedCategorical_1/mode/ArgMax/dimension"
-          }
-        }
-        node_def {
-          name: "ShiftedCategorical_1/mode/ArgMax"
-          op: "ArgMax"
-          input: "QNetwork/dense_2/BiasAdd:output:0"
-          input: "ShiftedCategorical_1/mode/ArgMax/dimension:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "ShiftedCategorical_1/mode/ArgMax"
-          }
-        }
-        node_def {
-          name: "add/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT64
-                tensor_shape {
-                }
-                int64_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "add/y"
-          }
-        }
-        node_def {
-          name: "add"
-          op: "AddV2"
-          input: "ShiftedCategorical_1/mode/ArgMax:output:0"
-          input: "add/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "add"
-          }
-        }
-        node_def {
-          name: "Deterministic/atol"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT64
-                tensor_shape {
-                }
-                int64_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic/atol"
-          }
-        }
-        node_def {
-          name: "Deterministic/rtol"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT64
-                tensor_shape {
-                }
-                int64_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic/rtol"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/sample_shape/x"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_FLOAT
-                tensor_shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/sample_shape/x"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/sample_shape"
-          op: "Cast"
-          input: "Deterministic_1/sample/sample_shape/x:output:0"
-          attr {
-            key: "DstT"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "SrcT"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/sample_shape"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/Shape"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                int_val: 1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/Shape"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/Shape_1"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/Shape_1"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/Shape_2"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/Shape_2"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/BroadcastArgs"
-          op: "BroadcastArgs"
-          input: "Deterministic_1/sample/Shape_1:output:0"
-          input: "Deterministic_1/sample/Shape_2:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/BroadcastArgs"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/BroadcastArgs_1"
-          op: "BroadcastArgs"
-          input: "Deterministic_1/sample/Shape:output:0"
-          input: "Deterministic_1/sample/BroadcastArgs:r0:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/BroadcastArgs_1"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/Const"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/Const"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/concat/values_0"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                int_val: 1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/concat/values_0"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/concat/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/concat/axis"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/concat"
-          op: "ConcatV2"
-          input: "Deterministic_1/sample/concat/values_0:output:0"
-          input: "Deterministic_1/sample/BroadcastArgs_1:r0:0"
-          input: "Deterministic_1/sample/Const:output:0"
-          input: "Deterministic_1/sample/concat/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 3
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/concat"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/BroadcastTo"
-          op: "BroadcastTo"
-          input: "add:z:0"
-          input: "Deterministic_1/sample/concat:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/BroadcastTo"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/Shape_3"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                    size: 2
-                  }
-                }
-                tensor_content: "\001\000\000\000\001\000\000\000"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/Shape_3"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/strided_slice/stack"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                int_val: 1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/strided_slice/stack"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/strided_slice/stack_1"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                int_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/strided_slice/stack_1"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/strided_slice/stack_2"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                int_val: 1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/strided_slice/stack_2"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/strided_slice"
-          op: "StridedSlice"
-          input: "Deterministic_1/sample/Shape_3:output:0"
-          input: "Deterministic_1/sample/strided_slice/stack:output:0"
-          input: "Deterministic_1/sample/strided_slice/stack_1:output:0"
-          input: "Deterministic_1/sample/strided_slice/stack_2:output:0"
-          attr {
-            key: "Index"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "end_mask"
-            value {
-              i: 1
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/strided_slice"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/concat_1/axis"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/concat_1/axis"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/concat_1"
-          op: "ConcatV2"
-          input: "Deterministic_1/sample/sample_shape:y:0"
-          input: "Deterministic_1/sample/strided_slice:output:0"
-          input: "Deterministic_1/sample/concat_1/axis:output:0"
-          attr {
-            key: "N"
-            value {
-              i: 2
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/concat_1"
-          }
-        }
-        node_def {
-          name: "Deterministic_1/sample/Reshape"
-          op: "Reshape"
-          input: "Deterministic_1/sample/BroadcastTo:output:0"
-          input: "Deterministic_1/sample/concat_1:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Deterministic_1/sample/Reshape"
-          }
-        }
-        node_def {
-          name: "clip_by_value/Minimum/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT64
-                tensor_shape {
-                }
-                int64_val: 1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "clip_by_value/Minimum/y"
-          }
-        }
-        node_def {
-          name: "clip_by_value/Minimum"
-          op: "Minimum"
-          input: "Deterministic_1/sample/Reshape:output:0"
-          input: "clip_by_value/Minimum/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "clip_by_value/Minimum"
-          }
-        }
-        node_def {
-          name: "clip_by_value/y"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT64
-                tensor_shape {
-                }
-                int64_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "clip_by_value/y"
-          }
-        }
-        node_def {
-          name: "clip_by_value"
-          op: "Maximum"
-          input: "clip_by_value/Minimum:z:0"
-          input: "clip_by_value/y:output:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "clip_by_value"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "clip_by_value:z:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 8
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 9
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 10
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 11
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 12
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 13
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 14
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 15
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 16
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 17
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 18
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 19
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 20
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_signature_wrapper_4619033"
-        }
-        node_def {
-          name: "PartitionedCall"
-          op: "PartitionedCall"
-          attr {
-            key: "Tin"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_function_with_signature_4619029"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "PartitionedCall"
-          }
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference__traced_save_4619143"
-          input_arg {
-            name: "file_prefix"
-            type: DT_STRING
-          }
-          input_arg {
-            name: "savev2_train_step_read_readvariableop"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "savev2_qnetwork_encodingnetwork_dense_kernel_read_readvariableop"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "savev2_qnetwork_encodingnetwork_dense_bias_read_readvariableop"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "savev2_qnetwork_encodingnetwork_dense_1_kernel_read_readvariableop"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "savev2_qnetwork_encodingnetwork_dense_1_bias_read_readvariableop"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "savev2_qnetwork_dense_2_kernel_read_readvariableop"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "savev2_qnetwork_dense_2_bias_read_readvariableop"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "savev2_1_const"
-            type: DT_STRING
-          }
-          output_arg {
-            name: "identity_1"
-            type: DT_STRING
-          }
-          is_stateful: true
-          control_output: "MergeV2Checkpoints"
-          control_output: "SaveV2"
-          control_output: "SaveV2_1"
-        }
-        node_def {
-          name: "StaticRegexFullMatch"
-          op: "StaticRegexFullMatch"
-          input: "file_prefix"
-          device: "/device:CPU:*"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "pattern"
-            value {
-              s: "^s3://.*"
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StaticRegexFullMatch"
-          }
-        }
-        node_def {
-          name: "Const"
-          op: "Const"
-          device: "/device:CPU:*"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                }
-                string_val: ".part"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Const"
-          }
-        }
-        node_def {
-          name: "Const_1"
-          op: "Const"
-          device: "/device:CPU:*"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                }
-                string_val: "_temp_f4c8d2e64931472295be68a11e57e937/part"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Const_1"
-          }
-        }
-        node_def {
-          name: "Select"
-          op: "Select"
-          input: "StaticRegexFullMatch:output:0"
-          input: "Const:output:0"
-          input: "Const_1:output:0"
-          device: "/device:CPU:*"
-          attr {
-            key: "T"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Select"
-          }
-        }
-        node_def {
-          name: "StringJoin"
-          op: "StringJoin"
-          input: "file_prefix"
-          input: "Select:output:0"
-          device: "/device:CPU:*"
-          attr {
-            key: "N"
-            value {
-              i: 2
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StringJoin"
-          }
-        }
-        node_def {
-          name: "num_shards"
-          op: "Const"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: 2
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "num_shards"
-          }
-        }
-        node_def {
-          name: "ShardedFilename/shard"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: 0
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "ShardedFilename/shard"
-          }
-        }
-        node_def {
-          name: "ShardedFilename"
-          op: "ShardedFilename"
-          input: "StringJoin:output:0"
-          input: "ShardedFilename/shard:output:0"
-          input: "num_shards:output:0"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "ShardedFilename"
-          }
-        }
-        node_def {
-          name: "SaveV2/tensor_names"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 7
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 7
-                  }
-                }
-                string_val: "train_step/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/0/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/1/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/2/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/3/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/4/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/5/.ATTRIBUTES/VARIABLE_VALUE"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "SaveV2/tensor_names"
-          }
-        }
-        node_def {
-          name: "SaveV2/shape_and_slices"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 7
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 7
-                  }
-                }
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "SaveV2/shape_and_slices"
-          }
-        }
-        node_def {
-          name: "SaveV2"
-          op: "SaveV2"
-          input: "ShardedFilename:filename:0"
-          input: "SaveV2/tensor_names:output:0"
-          input: "SaveV2/shape_and_slices:output:0"
-          input: "savev2_train_step_read_readvariableop"
-          input: "savev2_qnetwork_encodingnetwork_dense_kernel_read_readvariableop"
-          input: "savev2_qnetwork_encodingnetwork_dense_bias_read_readvariableop"
-          input: "savev2_qnetwork_encodingnetwork_dense_1_kernel_read_readvariableop"
-          input: "savev2_qnetwork_encodingnetwork_dense_1_bias_read_readvariableop"
-          input: "savev2_qnetwork_dense_2_kernel_read_readvariableop"
-          input: "savev2_qnetwork_dense_2_bias_read_readvariableop"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtypes"
-            value {
-              list {
-                type: DT_INT64
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "SaveV2"
-          }
-        }
-        node_def {
-          name: "ShardedFilename_1/shard"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT32
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_INT32
-                tensor_shape {
-                }
-                int_val: 1
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "ShardedFilename_1/shard"
-          }
-        }
-        node_def {
-          name: "ShardedFilename_1"
-          op: "ShardedFilename"
-          input: "StringJoin:output:0"
-          input: "ShardedFilename_1/shard:output:0"
-          input: "num_shards:output:0"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "ShardedFilename_1"
-          }
-        }
-        node_def {
-          name: "SaveV2_1/tensor_names"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                string_val: "_CHECKPOINTABLE_OBJECT_GRAPH"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "SaveV2_1/tensor_names"
-          }
-        }
-        node_def {
-          name: "SaveV2_1/shape_and_slices"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                string_val: ""
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "SaveV2_1/shape_and_slices"
-          }
-        }
-        node_def {
-          name: "SaveV2_1"
-          op: "SaveV2"
-          input: "ShardedFilename_1:filename:0"
-          input: "SaveV2_1/tensor_names:output:0"
-          input: "SaveV2_1/shape_and_slices:output:0"
-          input: "savev2_1_const"
-          input: "^SaveV2"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtypes"
-            value {
-              list {
-                type: DT_STRING
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "SaveV2_1"
-          }
-        }
-        node_def {
-          name: "MergeV2Checkpoints/checkpoint_prefixes"
-          op: "Pack"
-          input: "ShardedFilename:filename:0"
-          input: "ShardedFilename_1:filename:0"
-          input: "^SaveV2"
-          input: "^SaveV2_1"
-          device: "/device:CPU:0"
-          attr {
-            key: "N"
-            value {
-              i: 2
-            }
-          }
-          attr {
-            key: "T"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 2
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "MergeV2Checkpoints/checkpoint_prefixes"
-          }
-        }
-        node_def {
-          name: "MergeV2Checkpoints"
-          op: "MergeV2Checkpoints"
-          input: "MergeV2Checkpoints/checkpoint_prefixes:output:0"
-          input: "file_prefix"
-          input: "^SaveV2_1"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "MergeV2Checkpoints"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "file_prefix"
-          input: "^MergeV2Checkpoints"
-          device: "/device:CPU:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        node_def {
-          name: "Identity_1"
-          op: "Identity"
-          input: "Identity:output:0"
-          input: "^MergeV2Checkpoints"
-          input: "^SaveV2"
-          input: "^SaveV2_1"
-          attr {
-            key: "T"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_1"
-          }
-        }
-        ret {
-          key: "identity_1"
-          value: "Identity_1:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-              }
-              shape {
-              }
-              shape {
-                dim {
-                  size: 34
-                }
-                dim {
-                  size: 100
-                }
-              }
-              shape {
-                dim {
-                  size: 100
-                }
-              }
-              shape {
-                dim {
-                  size: 100
-                }
-                dim {
-                  size: 40
-                }
-              }
-              shape {
-                dim {
-                  size: 40
-                }
-              }
-              shape {
-                dim {
-                  size: 40
-                }
-                dim {
-                  size: 2
-                }
-              }
-              shape {
-                dim {
-                  size: 2
-                }
-              }
-              shape {
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "MergeV2Checkpoints"
-          value: "MergeV2Checkpoints"
-        }
-        control_ret {
-          key: "SaveV2"
-          value: "SaveV2"
-        }
-        control_ret {
-          key: "SaveV2_1"
-          value: "SaveV2_1"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "file_prefix"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 34
-                    }
-                    dim {
-                      size: 100
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 100
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 100
-                    }
-                    dim {
-                      size: 40
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 40
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 40
-                    }
-                    dim {
-                      size: 2
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 2
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 8
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_function_722"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_signature_wrapper_4619026"
-          input_arg {
-            name: "callee_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callee_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callee_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callsite_height"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "cost_estimate"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "discount"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "edge_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "inlining_default"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "node_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "nr_ctant_params"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "reward"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "step_type"
-            type: DT_INT32
-          }
-          input_arg {
-            name: "unknown"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_0"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_1"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_2"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_3"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_4"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-          control_output: "StatefulPartitionedCall"
-        }
-        node_def {
-          name: "StatefulPartitionedCall"
-          op: "StatefulPartitionedCall"
-          input: "step_type"
-          input: "reward"
-          input: "discount"
-          input: "callee_basic_block_count"
-          input: "callee_conditionally_executed_blocks"
-          input: "callee_users"
-          input: "caller_basic_block_count"
-          input: "caller_conditionally_executed_blocks"
-          input: "caller_users"
-          input: "callsite_height"
-          input: "cost_estimate"
-          input: "edge_count"
-          input: "inlining_default"
-          input: "node_count"
-          input: "nr_ctant_params"
-          input: "unknown"
-          input: "unknown_0"
-          input: "unknown_1"
-          input: "unknown_2"
-          input: "unknown_3"
-          input: "unknown_4"
-          attr {
-            key: "Tin"
-            value {
-              list {
-                type: DT_INT32
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-                type: DT_INT64
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-                i: 15
-                i: 16
-                i: 17
-                i: 18
-                i: 19
-                i: 20
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_function_with_signature_4618993"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StatefulPartitionedCall"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "StatefulPartitionedCall:output:0"
-          input: "^StatefulPartitionedCall"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "StatefulPartitionedCall"
-          value: "StatefulPartitionedCall"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callsite_height"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "cost_estimate"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 8
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "discount"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 9
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "edge_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 10
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "inlining_default"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 11
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "node_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 12
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "nr_ctant_params"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 13
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "reward"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 14
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "step_type"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 15
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 16
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 17
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 18
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 19
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 20
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_function_with_signature_4618993"
-          input_arg {
-            name: "step_type"
-            type: DT_INT32
-          }
-          input_arg {
-            name: "reward"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "discount"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "callee_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callee_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callee_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callsite_height"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "cost_estimate"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "edge_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "inlining_default"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "node_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "nr_ctant_params"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "unknown"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_0"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_1"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_2"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_3"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_4"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-          control_output: "StatefulPartitionedCall"
-        }
-        node_def {
-          name: "StatefulPartitionedCall"
-          op: "StatefulPartitionedCall"
-          input: "step_type"
-          input: "reward"
-          input: "discount"
-          input: "callee_basic_block_count"
-          input: "callee_conditionally_executed_blocks"
-          input: "callee_users"
-          input: "caller_basic_block_count"
-          input: "caller_conditionally_executed_blocks"
-          input: "caller_users"
-          input: "callsite_height"
-          input: "cost_estimate"
-          input: "edge_count"
-          input: "inlining_default"
-          input: "node_count"
-          input: "nr_ctant_params"
-          input: "unknown"
-          input: "unknown_0"
-          input: "unknown_1"
-          input: "unknown_2"
-          input: "unknown_3"
-          input: "unknown_4"
-          attr {
-            key: "Tin"
-            value {
-              list {
-                type: DT_INT32
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-                type: DT_INT64
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-                i: 15
-                i: 16
-                i: 17
-                i: 18
-                i: 19
-                i: 20
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_polymorphic_action_fn_4618978"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StatefulPartitionedCall"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "StatefulPartitionedCall:output:0"
-          input: "^StatefulPartitionedCall"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "StatefulPartitionedCall"
-          value: "StatefulPartitionedCall"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "step_type"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "reward"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "discount"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 8
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 9
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callsite_height"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 10
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "cost_estimate"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 11
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "edge_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 12
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "inlining_default"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 13
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "node_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 14
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "nr_ctant_params"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 15
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 16
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 17
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 18
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 19
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 20
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_polymorphic_action_fn_4619080"
-          input_arg {
-            name: "time_step_step_type"
-            type: DT_INT32
-          }
-          input_arg {
-            name: "time_step_reward"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "time_step_discount"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "time_step_observation_callee_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_callee_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_callee_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_caller_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_caller_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_caller_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_callsite_height"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_cost_estimate"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_edge_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_inlining_default"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_node_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_observation_nr_ctant_params"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "unknown"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_0"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_1"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_2"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_3"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_4"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-          control_output: "StatefulPartitionedCall"
-        }
-        node_def {
-          name: "StatefulPartitionedCall"
-          op: "StatefulPartitionedCall"
-          input: "time_step_step_type"
-          input: "time_step_reward"
-          input: "time_step_discount"
-          input: "time_step_observation_callee_basic_block_count"
-          input: "time_step_observation_callee_conditionally_executed_blocks"
-          input: "time_step_observation_callee_users"
-          input: "time_step_observation_caller_basic_block_count"
-          input: "time_step_observation_caller_conditionally_executed_blocks"
-          input: "time_step_observation_caller_users"
-          input: "time_step_observation_callsite_height"
-          input: "time_step_observation_cost_estimate"
-          input: "time_step_observation_edge_count"
-          input: "time_step_observation_inlining_default"
-          input: "time_step_observation_node_count"
-          input: "time_step_observation_nr_ctant_params"
-          input: "unknown"
-          input: "unknown_0"
-          input: "unknown_1"
-          input: "unknown_2"
-          input: "unknown_3"
-          input: "unknown_4"
-          attr {
-            key: "Tin"
-            value {
-              list {
-                type: DT_INT32
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-                type: DT_INT64
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-                i: 15
-                i: 16
-                i: 17
-                i: 18
-                i: 19
-                i: 20
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_action_931"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StatefulPartitionedCall"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "StatefulPartitionedCall:output:0"
-          input: "^StatefulPartitionedCall"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "StatefulPartitionedCall"
-          value: "StatefulPartitionedCall"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/step_type"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/reward"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/discount"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/callee_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/callee_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/callee_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/caller_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/caller_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 8
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/caller_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 9
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/callsite_height"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 10
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/cost_estimate"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 11
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/edge_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 12
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/inlining_default"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 13
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/node_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 14
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step/observation/nr_ctant_params"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 15
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 16
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 17
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 18
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 19
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 20
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_function_with_signature_4619040"
-          input_arg {
-            name: "unknown"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-          control_output: "StatefulPartitionedCall"
-        }
-        node_def {
-          name: "StatefulPartitionedCall"
-          op: "StatefulPartitionedCall"
-          input: "unknown"
-          attr {
-            key: "Tin"
-            value {
-              list {
-                type: DT_RESOURCE
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-                type: DT_INT64
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-                i: 0
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_<lambda>_728"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StatefulPartitionedCall"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "StatefulPartitionedCall:output:0"
-          input: "^StatefulPartitionedCall"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "StatefulPartitionedCall"
-          value: "StatefulPartitionedCall"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_polymorphic_action_fn_4618978"
-          input_arg {
-            name: "time_step"
-            type: DT_INT32
-          }
-          input_arg {
-            name: "time_step_1"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "time_step_2"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "time_step_3"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_4"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_5"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_6"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_7"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_8"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_9"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_10"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_11"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_12"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_13"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "time_step_14"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "unknown"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_0"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_1"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_2"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_3"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_4"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-          control_output: "StatefulPartitionedCall"
-        }
-        node_def {
-          name: "StatefulPartitionedCall"
-          op: "StatefulPartitionedCall"
-          input: "time_step"
-          input: "time_step_1"
-          input: "time_step_2"
-          input: "time_step_3"
-          input: "time_step_4"
-          input: "time_step_5"
-          input: "time_step_6"
-          input: "time_step_7"
-          input: "time_step_8"
-          input: "time_step_9"
-          input: "time_step_10"
-          input: "time_step_11"
-          input: "time_step_12"
-          input: "time_step_13"
-          input: "time_step_14"
-          input: "unknown"
-          input: "unknown_0"
-          input: "unknown_1"
-          input: "unknown_2"
-          input: "unknown_3"
-          input: "unknown_4"
-          attr {
-            key: "Tin"
-            value {
-              list {
-                type: DT_INT32
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-                type: DT_INT64
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-                i: 15
-                i: 16
-                i: 17
-                i: 18
-                i: 19
-                i: 20
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_action_931"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StatefulPartitionedCall"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "StatefulPartitionedCall:output:0"
-          input: "^StatefulPartitionedCall"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "StatefulPartitionedCall"
-          value: "StatefulPartitionedCall"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 8
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 9
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 10
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 11
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 12
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 13
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 14
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "time_step"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 15
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 16
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 17
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 18
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 19
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 20
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_polymorphic_action_fn_946"
-          input_arg {
-            name: "step_type"
-            type: DT_INT32
-          }
-          input_arg {
-            name: "reward"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "discount"
-            type: DT_FLOAT
-          }
-          input_arg {
-            name: "callee_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callee_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callee_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_basic_block_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_conditionally_executed_blocks"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "caller_users"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "callsite_height"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "cost_estimate"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "edge_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "inlining_default"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "node_count"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "nr_ctant_params"
-            type: DT_INT64
-          }
-          input_arg {
-            name: "unknown"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_0"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_1"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_2"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_3"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "unknown_4"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-          control_output: "StatefulPartitionedCall"
-        }
-        node_def {
-          name: "StatefulPartitionedCall"
-          op: "StatefulPartitionedCall"
-          input: "step_type"
-          input: "reward"
-          input: "discount"
-          input: "callee_basic_block_count"
-          input: "callee_conditionally_executed_blocks"
-          input: "callee_users"
-          input: "caller_basic_block_count"
-          input: "caller_conditionally_executed_blocks"
-          input: "caller_users"
-          input: "callsite_height"
-          input: "cost_estimate"
-          input: "edge_count"
-          input: "inlining_default"
-          input: "node_count"
-          input: "nr_ctant_params"
-          input: "unknown"
-          input: "unknown_0"
-          input: "unknown_1"
-          input: "unknown_2"
-          input: "unknown_3"
-          input: "unknown_4"
-          attr {
-            key: "Tin"
-            value {
-              list {
-                type: DT_INT32
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_INT64
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-                type: DT_RESOURCE
-              }
-            }
-          }
-          attr {
-            key: "Tout"
-            value {
-              list {
-                type: DT_INT64
-              }
-            }
-          }
-          attr {
-            key: "_collective_manager_ids"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "_read_only_resource_inputs"
-            value {
-              list {
-                i: 15
-                i: 16
-                i: 17
-                i: 18
-                i: 19
-                i: 20
-              }
-            }
-          }
-          attr {
-            key: "config_proto"
-            value {
-              s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0012\005*\0010J\0008\001"
-            }
-          }
-          attr {
-            key: "f"
-            value {
-              func {
-                name: "__inference_action_931"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "StatefulPartitionedCall"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "StatefulPartitionedCall:output:0"
-          input: "^StatefulPartitionedCall"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                dim {
-                  size: 1
-                }
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "StatefulPartitionedCall"
-          value: "StatefulPartitionedCall"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "step_type"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "reward"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "discount"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callee_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_basic_block_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_conditionally_executed_blocks"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 8
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "caller_users"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 9
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "callsite_height"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 10
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "cost_estimate"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 11
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "edge_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 12
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "inlining_default"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 13
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "node_count"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 14
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "nr_ctant_params"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 15
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 16
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 17
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 18
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 19
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 20
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference__traced_restore_4619176"
-          input_arg {
-            name: "file_prefix"
-            type: DT_STRING
-          }
-          input_arg {
-            name: "assignvariableop_train_step"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "assignvariableop_1_qnetwork_encodingnetwork_dense_kernel"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "assignvariableop_2_qnetwork_encodingnetwork_dense_bias"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "assignvariableop_3_qnetwork_encodingnetwork_dense_1_kernel"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "assignvariableop_4_qnetwork_encodingnetwork_dense_1_bias"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "assignvariableop_5_qnetwork_dense_2_kernel"
-            type: DT_RESOURCE
-          }
-          input_arg {
-            name: "assignvariableop_6_qnetwork_dense_2_bias"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity_8"
-            type: DT_STRING
-          }
-          is_stateful: true
-          control_output: "AssignVariableOp"
-          control_output: "AssignVariableOp_1"
-          control_output: "AssignVariableOp_2"
-          control_output: "AssignVariableOp_3"
-          control_output: "AssignVariableOp_4"
-          control_output: "AssignVariableOp_5"
-          control_output: "AssignVariableOp_6"
-          control_output: "RestoreV2"
-          control_output: "RestoreV2_1"
-        }
-        node_def {
-          name: "RestoreV2/tensor_names"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 7
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 7
-                  }
-                }
-                string_val: "train_step/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/0/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/1/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/2/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/3/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/4/.ATTRIBUTES/VARIABLE_VALUE"
-                string_val: "model_variables/5/.ATTRIBUTES/VARIABLE_VALUE"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "RestoreV2/tensor_names"
-          }
-        }
-        node_def {
-          name: "RestoreV2/shape_and_slices"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 7
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 7
-                  }
-                }
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-                string_val: ""
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "RestoreV2/shape_and_slices"
-          }
-        }
-        node_def {
-          name: "RestoreV2"
-          op: "RestoreV2"
-          input: "file_prefix"
-          input: "RestoreV2/tensor_names:output:0"
-          input: "RestoreV2/shape_and_slices:output:0"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-                shape {
-                  unknown_rank: true
-                }
-                shape {
-                  unknown_rank: true
-                }
-                shape {
-                  unknown_rank: true
-                }
-                shape {
-                  unknown_rank: true
-                }
-                shape {
-                  unknown_rank: true
-                }
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtypes"
-            value {
-              list {
-                type: DT_INT64
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-                type: DT_FLOAT
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "RestoreV2"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "RestoreV2:tensors:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        node_def {
-          name: "AssignVariableOp"
-          op: "AssignVariableOp"
-          input: "assignvariableop_train_step"
-          input: "Identity:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT64
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "AssignVariableOp"
-          }
-        }
-        node_def {
-          name: "Identity_1"
-          op: "Identity"
-          input: "RestoreV2:tensors:1"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_1"
-          }
-        }
-        node_def {
-          name: "AssignVariableOp_1"
-          op: "AssignVariableOp"
-          input: "assignvariableop_1_qnetwork_encodingnetwork_dense_kernel"
-          input: "Identity_1:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "AssignVariableOp_1"
-          }
-        }
-        node_def {
-          name: "Identity_2"
-          op: "Identity"
-          input: "RestoreV2:tensors:2"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_2"
-          }
-        }
-        node_def {
-          name: "AssignVariableOp_2"
-          op: "AssignVariableOp"
-          input: "assignvariableop_2_qnetwork_encodingnetwork_dense_bias"
-          input: "Identity_2:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "AssignVariableOp_2"
-          }
-        }
-        node_def {
-          name: "Identity_3"
-          op: "Identity"
-          input: "RestoreV2:tensors:3"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_3"
-          }
-        }
-        node_def {
-          name: "AssignVariableOp_3"
-          op: "AssignVariableOp"
-          input: "assignvariableop_3_qnetwork_encodingnetwork_dense_1_kernel"
-          input: "Identity_3:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "AssignVariableOp_3"
-          }
-        }
-        node_def {
-          name: "Identity_4"
-          op: "Identity"
-          input: "RestoreV2:tensors:4"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_4"
-          }
-        }
-        node_def {
-          name: "AssignVariableOp_4"
-          op: "AssignVariableOp"
-          input: "assignvariableop_4_qnetwork_encodingnetwork_dense_1_bias"
-          input: "Identity_4:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "AssignVariableOp_4"
-          }
-        }
-        node_def {
-          name: "Identity_5"
-          op: "Identity"
-          input: "RestoreV2:tensors:5"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_5"
-          }
-        }
-        node_def {
-          name: "AssignVariableOp_5"
-          op: "AssignVariableOp"
-          input: "assignvariableop_5_qnetwork_dense_2_kernel"
-          input: "Identity_5:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "AssignVariableOp_5"
-          }
-        }
-        node_def {
-          name: "Identity_6"
-          op: "Identity"
-          input: "RestoreV2:tensors:6"
-          attr {
-            key: "T"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_6"
-          }
-        }
-        node_def {
-          name: "AssignVariableOp_6"
-          op: "AssignVariableOp"
-          input: "assignvariableop_6_qnetwork_dense_2_bias"
-          input: "Identity_6:output:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_FLOAT
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "AssignVariableOp_6"
-          }
-        }
-        node_def {
-          name: "RestoreV2_1/tensor_names"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                string_val: "_CHECKPOINTABLE_OBJECT_GRAPH"
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "RestoreV2_1/tensor_names"
-          }
-        }
-        node_def {
-          name: "RestoreV2_1/shape_and_slices"
-          op: "Const"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  dim {
-                    size: 1
-                  }
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "value"
-            value {
-              tensor {
-                dtype: DT_STRING
-                tensor_shape {
-                  dim {
-                    size: 1
-                  }
-                }
-                string_val: ""
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "RestoreV2_1/shape_and_slices"
-          }
-        }
-        node_def {
-          name: "RestoreV2_1"
-          op: "RestoreV2"
-          input: "file_prefix"
-          input: "RestoreV2_1/tensor_names:output:0"
-          input: "RestoreV2_1/shape_and_slices:output:0"
-          input: "^RestoreV2"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                  unknown_rank: true
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtypes"
-            value {
-              list {
-                type: DT_STRING
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "RestoreV2_1"
-          }
-        }
-        node_def {
-          name: "NoOp"
-          op: "NoOp"
-          device: "/device:CPU:0"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "NoOp"
-          }
-        }
-        node_def {
-          name: "Identity_7"
-          op: "Identity"
-          input: "file_prefix"
-          input: "^AssignVariableOp"
-          input: "^AssignVariableOp_1"
-          input: "^AssignVariableOp_2"
-          input: "^AssignVariableOp_3"
-          input: "^AssignVariableOp_4"
-          input: "^AssignVariableOp_5"
-          input: "^AssignVariableOp_6"
-          input: "^NoOp"
-          device: "/device:CPU:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_7"
-          }
-        }
-        node_def {
-          name: "Identity_8"
-          op: "Identity"
-          input: "Identity_7:output:0"
-          input: "^AssignVariableOp"
-          input: "^AssignVariableOp_1"
-          input: "^AssignVariableOp_2"
-          input: "^AssignVariableOp_3"
-          input: "^AssignVariableOp_4"
-          input: "^AssignVariableOp_5"
-          input: "^AssignVariableOp_6"
-          input: "^RestoreV2"
-          input: "^RestoreV2_1"
-          attr {
-            key: "T"
-            value {
-              type: DT_STRING
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity_8"
-          }
-        }
-        ret {
-          key: "identity_8"
-          value: "Identity_8:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        control_ret {
-          key: "AssignVariableOp"
-          value: "AssignVariableOp"
-        }
-        control_ret {
-          key: "AssignVariableOp_1"
-          value: "AssignVariableOp_1"
-        }
-        control_ret {
-          key: "AssignVariableOp_2"
-          value: "AssignVariableOp_2"
-        }
-        control_ret {
-          key: "AssignVariableOp_3"
-          value: "AssignVariableOp_3"
-        }
-        control_ret {
-          key: "AssignVariableOp_4"
-          value: "AssignVariableOp_4"
-        }
-        control_ret {
-          key: "AssignVariableOp_5"
-          value: "AssignVariableOp_5"
-        }
-        control_ret {
-          key: "AssignVariableOp_6"
-          value: "AssignVariableOp_6"
-        }
-        control_ret {
-          key: "RestoreV2"
-          value: "RestoreV2"
-        }
-        control_ret {
-          key: "RestoreV2_1"
-          value: "RestoreV2_1"
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-            attr {
-              key: "_user_specified_name"
-              value {
-                s: "file_prefix"
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 1
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 2
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 3
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 4
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 5
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 6
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 7
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-      function {
-        signature {
-          name: "__inference_<lambda>_728"
-          input_arg {
-            name: "readvariableop_resource"
-            type: DT_RESOURCE
-          }
-          output_arg {
-            name: "identity"
-            type: DT_INT64
-          }
-          is_stateful: true
-        }
-        node_def {
-          name: "ReadVariableOp"
-          op: "ReadVariableOp"
-          input: "readvariableop_resource"
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          attr {
-            key: "dtype"
-            value {
-              type: DT_INT64
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "ReadVariableOp"
-          }
-        }
-        node_def {
-          name: "Identity"
-          op: "Identity"
-          input: "ReadVariableOp:value:0"
-          attr {
-            key: "T"
-            value {
-              type: DT_INT64
-            }
-          }
-          attr {
-            key: "_output_shapes"
-            value {
-              list {
-                shape {
-                }
-              }
-            }
-          }
-          experimental_debug_info {
-            original_node_names: "Identity"
-          }
-        }
-        ret {
-          key: "identity"
-          value: "Identity:output:0"
-        }
-        attr {
-          key: "_input_shapes"
-          value {
-            list {
-              shape {
-                unknown_rank: true
-              }
-            }
-          }
-        }
-        arg_attr {
-          key: 0
-          value {
-            attr {
-              key: "_output_shapes"
-              value {
-                list {
-                  shape {
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    versions {
-      producer: 357
-      min_consumer: 12
-    }
-  }
-  saver_def {
-    filename_tensor_name: "saver_filename:0"
-    save_tensor_name: "StatefulPartitionedCall_2:0"
-    restore_op_name: "StatefulPartitionedCall_3"
-    version: V2
-  }
-  collection_def {
-    key: "saved_model_main_op"
-    value {
-      node_list {
-        value: "NoOp"
-      }
-    }
-  }
-  signature_def {
-    key: "__saved_model_init_op"
-    value {
-      outputs {
-        key: "__saved_model_init_op"
-        value {
-          name: "NoOp"
-          tensor_shape {
-            unknown_rank: true
-          }
-        }
-      }
-    }
-  }
-  signature_def {
-    key: "action"
-    value {
-      inputs {
-        key: "callee_basic_block_count"
-        value {
-          name: "action_callee_basic_block_count:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "callee_conditionally_executed_blocks"
-        value {
-          name: "action_callee_conditionally_executed_blocks:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "callee_users"
-        value {
-          name: "action_callee_users:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "caller_basic_block_count"
-        value {
-          name: "action_caller_basic_block_count:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "caller_conditionally_executed_blocks"
-        value {
-          name: "action_caller_conditionally_executed_blocks:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "caller_users"
-        value {
-          name: "action_caller_users:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "callsite_height"
-        value {
-          name: "action_callsite_height:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "cost_estimate"
-        value {
-          name: "action_cost_estimate:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "discount"
-        value {
-          name: "action_discount:0"
-          dtype: DT_FLOAT
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "edge_count"
-        value {
-          name: "action_edge_count:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "inlining_default"
-        value {
-          name: "action_inlining_default:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "node_count"
-        value {
-          name: "action_node_count:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "nr_ctant_params"
-        value {
-          name: "action_nr_ctant_params:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "reward"
-        value {
-          name: "action_reward:0"
-          dtype: DT_FLOAT
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      inputs {
-        key: "step_type"
-        value {
-          name: "action_step_type:0"
-          dtype: DT_INT32
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      outputs {
-        key: "inlining_decision"
-        value {
-          name: "StatefulPartitionedCall:0"
-          dtype: DT_INT64
-          tensor_shape {
-            dim {
-              size: 1
-            }
-          }
-        }
-      }
-      method_name: "tensorflow/serving/predict"
-    }
-  }
-  signature_def {
-    key: "get_initial_state"
-    value {
-      method_name: "tensorflow/serving/predict"
-    }
-  }
-  signature_def {
-    key: "get_train_step"
-    value {
-      outputs {
-        key: "int64"
-        value {
-          name: "StatefulPartitionedCall_1:0"
-          dtype: DT_INT64
-          tensor_shape {
-          }
-        }
-      }
-      method_name: "tensorflow/serving/predict"
-    }
-  }
-  object_graph_def {
-    nodes {
-      children {
-        node_id: 1
-        local_name: "_time_step_spec"
-      }
-      children {
-        node_id: 2
-        local_name: "_trajectory_spec"
-      }
-      children {
-        node_id: 3
-        local_name: "_wrapped_policy"
-      }
-      children {
-        node_id: 4
-        local_name: "train_step"
-      }
-      children {
-        node_id: 5
-        local_name: "model_variables"
-      }
-      children {
-        node_id: 6
-        local_name: "signatures"
-      }
-      children {
-        node_id: 210
-        local_name: "action"
-      }
-      children {
-        node_id: 211
-        local_name: "get_initial_state"
-      }
-      children {
-        node_id: 212
-        local_name: "get_train_step"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 7
-        local_name: "observation"
-      }
-      children {
-        node_id: 7
-        local_name: "3"
-      }
-      user_object {
-        identifier: "trackable_tuple_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 7
-        local_name: "observation"
-      }
-      children {
-        node_id: 7
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_tuple_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 8
-        local_name: "_q_network"
-      }
-      children {
-        node_id: 1
-        local_name: "_time_step_spec"
-      }
-      children {
-        node_id: 9
-        local_name: "_trajectory_spec"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      variable {
-        dtype: DT_INT64
-        shape {
-        }
-        name: "train_step"
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "0"
-      }
-      children {
-        node_id: 11
-        local_name: "1"
-      }
-      children {
-        node_id: 12
-        local_name: "2"
-      }
-      children {
-        node_id: 13
-        local_name: "3"
-      }
-      children {
-        node_id: 14
-        local_name: "4"
-      }
-      children {
-        node_id: 15
-        local_name: "5"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 213
-        local_name: "action"
-      }
-      children {
-        node_id: 214
-        local_name: "get_initial_state"
-      }
-      children {
-        node_id: 215
-        local_name: "get_train_step"
-      }
-      user_object {
-        identifier: "signature_map"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 16
-        local_name: "_input_tensor_spec"
-      }
-      children {
-        node_id: 17
-        local_name: "_encoder"
-      }
-      children {
-        node_id: 18
-        local_name: "_q_value_layer"
-      }
-      children {
-        node_id: 19
-        local_name: "variables"
-      }
-      children {
-        node_id: 20
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 21
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 22
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 216
-        local_name: "__call__"
-      }
-      children {
-        node_id: 217
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_network"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"QNetwork\", \"name\": \"QNetwork\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"config\": {\"layer was saved without config\": true}, \"is_graph_network\": false}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 7
-        local_name: "observation"
-      }
-      children {
-        node_id: 7
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_tuple_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      variable {
-        dtype: DT_FLOAT
-        shape {
-          dim {
-            size: 34
-          }
-          dim {
-            size: 100
-          }
-        }
-        trainable: true
-        name: "QNetwork/EncodingNetwork/dense/kernel"
-      }
-    }
-    nodes {
-      variable {
-        dtype: DT_FLOAT
-        shape {
-          dim {
-            size: 100
-          }
-        }
-        trainable: true
-        name: "QNetwork/EncodingNetwork/dense/bias"
-      }
-    }
-    nodes {
-      variable {
-        dtype: DT_FLOAT
-        shape {
-          dim {
-            size: 100
-          }
-          dim {
-            size: 40
-          }
-        }
-        trainable: true
-        name: "QNetwork/EncodingNetwork/dense_1/kernel"
-      }
-    }
-    nodes {
-      variable {
-        dtype: DT_FLOAT
-        shape {
-          dim {
-            size: 40
-          }
-        }
-        trainable: true
-        name: "QNetwork/EncodingNetwork/dense_1/bias"
-      }
-    }
-    nodes {
-      variable {
-        dtype: DT_FLOAT
-        shape {
-          dim {
-            size: 40
-          }
-          dim {
-            size: 2
-          }
-        }
-        trainable: true
-        name: "QNetwork/dense_2/kernel"
-      }
-    }
-    nodes {
-      variable {
-        dtype: DT_FLOAT
-        shape {
-          dim {
-            size: 2
-          }
-        }
-        trainable: true
-        name: "QNetwork/dense_2/bias"
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 23
-        local_name: "_input_tensor_spec"
-      }
-      children {
-        node_id: 24
-        local_name: "_preprocessing_nest"
-      }
-      children {
-        node_id: 25
-        local_name: "_flat_preprocessing_layers"
-      }
-      children {
-        node_id: 26
-        local_name: "_preprocessing_combiner"
-      }
-      children {
-        node_id: 27
-        local_name: "_postprocessing_layers"
-      }
-      children {
-        node_id: 28
-        local_name: "variables"
-      }
-      children {
-        node_id: 29
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 30
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 31
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 218
-        local_name: "__call__"
-      }
-      children {
-        node_id: 219
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_network"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"EncodingNetwork\", \"name\": \"EncodingNetwork\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"config\": {\"layer was saved without config\": true}, \"is_graph_network\": false}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 14
-        local_name: "kernel"
-      }
-      children {
-        node_id: 15
-        local_name: "bias"
-      }
-      children {
-        node_id: 32
-        local_name: "variables"
-      }
-      children {
-        node_id: 33
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 34
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 35
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 220
-        local_name: "__call__"
-      }
-      children {
-        node_id: 221
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Dense\", \"name\": \"dense_2\", \"trainable\": true, \"expects_training_arg\": false, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"dense_2\", \"trainable\": true, \"dtype\": \"float32\", \"units\": 2, \"activation\": \"linear\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"RandomUniform\", \"config\": {\"minval\": -0.03, \"maxval\": 0.03, \"seed\": null, \"dtype\": \"float32\"}}, \"bias_initializer\": {\"class_name\": \"Constant\", \"config\": {\"value\": -0.2, \"dtype\": \"float32\"}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}, \"input_spec\": {\"class_name\": \"InputSpec\", \"config\": {\"dtype\": null, \"shape\": null, \"ndim\": null, \"max_ndim\": null, \"min_ndim\": 2, \"axes\": {\"-1\": 40}}}, \"build_input_shape\": {\"class_name\": \"TensorShape\", \"items\": [0, 40]}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "0"
-      }
-      children {
-        node_id: 11
-        local_name: "1"
-      }
-      children {
-        node_id: 12
-        local_name: "2"
-      }
-      children {
-        node_id: 13
-        local_name: "3"
-      }
-      children {
-        node_id: 14
-        local_name: "4"
-      }
-      children {
-        node_id: 15
-        local_name: "5"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "0"
-      }
-      children {
-        node_id: 11
-        local_name: "1"
-      }
-      children {
-        node_id: 12
-        local_name: "2"
-      }
-      children {
-        node_id: 13
-        local_name: "3"
-      }
-      children {
-        node_id: 14
-        local_name: "4"
-      }
-      children {
-        node_id: 15
-        local_name: "5"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 36
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 19
-        local_name: "variables"
-      }
-      children {
-        node_id: 37
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 38
-        local_name: "metrics"
-      }
-      children {
-        node_id: 39
-        local_name: "layers"
-      }
-      children {
-        node_id: 20
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 40
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 21
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 216
-        local_name: "__call__"
-      }
-      children {
-        node_id: 217
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 217
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 41
-        local_name: "0"
-      }
-      children {
-        node_id: 42
-        local_name: "1"
-      }
-      children {
-        node_id: 43
-        local_name: "2"
-      }
-      children {
-        node_id: 44
-        local_name: "3"
-      }
-      children {
-        node_id: 45
-        local_name: "4"
-      }
-      children {
-        node_id: 46
-        local_name: "5"
-      }
-      children {
-        node_id: 47
-        local_name: "6"
-      }
-      children {
-        node_id: 48
-        local_name: "7"
-      }
-      children {
-        node_id: 49
-        local_name: "8"
-      }
-      children {
-        node_id: 50
-        local_name: "9"
-      }
-      children {
-        node_id: 51
-        local_name: "10"
-      }
-      children {
-        node_id: 52
-        local_name: "11"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 53
-        local_name: "variables"
-      }
-      children {
-        node_id: 54
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 55
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 56
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 222
-        local_name: "__call__"
-      }
-      children {
-        node_id: 223
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Concatenate\", \"name\": \"concatenate\", \"trainable\": true, \"expects_training_arg\": false, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"concatenate\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}, \"build_input_shape\": [{\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 1]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}, {\"class_name\": \"TensorShape\", \"items\": [0, 3]}]}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 57
-        local_name: "0"
-      }
-      children {
-        node_id: 58
-        local_name: "1"
-      }
-      children {
-        node_id: 59
-        local_name: "2"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "0"
-      }
-      children {
-        node_id: 11
-        local_name: "1"
-      }
-      children {
-        node_id: 12
-        local_name: "2"
-      }
-      children {
-        node_id: 13
-        local_name: "3"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "0"
-      }
-      children {
-        node_id: 11
-        local_name: "1"
-      }
-      children {
-        node_id: 12
-        local_name: "2"
-      }
-      children {
-        node_id: 13
-        local_name: "3"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 60
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 28
-        local_name: "variables"
-      }
-      children {
-        node_id: 61
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 62
-        local_name: "metrics"
-      }
-      children {
-        node_id: 63
-        local_name: "layers"
-      }
-      children {
-        node_id: 29
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 64
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 30
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 218
-        local_name: "__call__"
-      }
-      children {
-        node_id: 219
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 219
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 14
-        local_name: "0"
-      }
-      children {
-        node_id: 15
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 14
-        local_name: "0"
-      }
-      children {
-        node_id: 15
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 65
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 32
-        local_name: "variables"
-      }
-      children {
-        node_id: 66
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 67
-        local_name: "metrics"
-      }
-      children {
-        node_id: 68
-        local_name: "layers"
-      }
-      children {
-        node_id: 33
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 69
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 34
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 220
-        local_name: "__call__"
-      }
-      children {
-        node_id: 221
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 221
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 17
-        local_name: "0"
-      }
-      children {
-        node_id: 18
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 70
-        local_name: "variables"
-      }
-      children {
-        node_id: 71
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 72
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 73
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 224
-        local_name: "__call__"
-      }
-      children {
-        node_id: 225
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 11.0, 12.0, 13.0, 14.0, 14.0, 14.0, 16.0, 17.0, 19.0, 23.0, 27.0, 39.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 74
-        local_name: "variables"
-      }
-      children {
-        node_id: 75
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 76
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 77
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 226
-        local_name: "__call__"
-      }
-      children {
-        node_id: 227
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_1\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_1\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 7.0, 8.0, 8.0, 8.0, 8.0, 9.0, 10.0, 10.0, 10.0, 12.0, 12.0, 12.0, 14.0, 14.0, 18.0, 20.0, 23.0, 30.0, 41.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 78
-        local_name: "variables"
-      }
-      children {
-        node_id: 79
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 80
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 81
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 228
-        local_name: "__call__"
-      }
-      children {
-        node_id: 229
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_2\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_2\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 18.0, 18.0, 18.0, 18.0, 18.0, 19.0, 19.0, 19.0, 19.0, 19.0, 20.0, 20.0, 20.0, 20.0, 20.0, 21.0, 21.0, 21.0, 21.0, 21.0, 21.0, 21.0, 22.0, 22.0, 22.0, 22.0, 23.0, 23.0, 23.0, 24.0, 24.0, 24.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 26.0, 26.0, 26.0, 27.0, 27.0, 27.0, 27.0, 28.0, 28.0, 29.0, 29.0, 29.0, 29.0, 30.0, 30.0, 31.0, 31.0, 31.0, 31.0, 32.0, 32.0, 33.0, 33.0, 33.0, 34.0, 34.0, 34.0, 34.0, 35.0, 35.0, 36.0, 36.0, 37.0, 37.0, 37.0, 38.0, 38.0, 39.0, 39.0, 40.0, 40.0, 41.0, 41.0, 41.0, 42.0, 43.0, 43.0, 44.0, 44.0, 45.0, 45.0, 46.0, 46.0, 46.0, 47.0, 47.0, 48.0, 49.0, 49.0, 50.0, 50.0, 51.0, 52.0, 53.0, 53.0, 54.0, 55.0, 56.0, 57.0, 57.0, 58.0, 59.0, 60.0, 61.0, 61.0, 63.0, 63.0, 64.0, 65.0, 66.0, 67.0, 67.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 85.0, 86.0, 88.0, 89.0, 91.0, 92.0, 94.0, 96.0, 97.0, 99.0, 100.0, 101.0, 103.0, 105.0, 107.0, 109.0, 111.0, 113.0, 115.0, 118.0, 121.0, 123.0, 126.0, 128.0, 130.0, 133.0, 135.0, 137.0, 140.0, 143.0, 146.0, 148.0, 151.0, 154.0, 157.0, 161.0, 163.0, 166.0, 169.0, 173.0, 178.0, 183.0, 189.0, 193.0, 197.0, 202.0, 208.0, 213.0, 218.0, 223.0, 228.0, 233.0, 239.0, 245.0, 250.0, 257.0, 262.0, 269.0, 277.0, 284.0, 292.0, 300.0, 308.0, 319.0, 329.0, 340.0, 349.0, 359.0, 371.0, 382.0, 394.0, 410.0, 423.0, 435.0, 445.0, 462.0, 480.0, 492.0, 506.0, 519.0, 536.0, 557.0, 577.0, 598.0, 622.0, 655.0, 679.0, 707.0, 733.0, 751.0, 787.0, 814.0, 847.0, 897.0, 934.0, 997.0, 1062.0, 1111.0, 1181.0, 1275.0, 1385.0, 1465.0, 1603.0, 1769.0, 2057.0, 2257.0, 2803.0, 3468.0, 4417.0, 6538.0, 16126.0, 23446.0, 33536.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 82
-        local_name: "variables"
-      }
-      children {
-        node_id: 83
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 84
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 85
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 230
-        local_name: "__call__"
-      }
-      children {
-        node_id: 231
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_3\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_3\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 20.0, 20.0, 20.0, 20.0, 20.0, 21.0, 21.0, 21.0, 21.0, 21.0, 21.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 23.0, 23.0, 23.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 25.0, 25.0, 25.0, 25.0, 25.0, 26.0, 26.0, 26.0, 26.0, 27.0, 27.0, 27.0, 27.0, 27.0, 28.0, 28.0, 28.0, 29.0, 29.0, 29.0, 29.0, 30.0, 30.0, 30.0, 31.0, 31.0, 31.0, 32.0, 32.0, 32.0, 33.0, 33.0, 33.0, 34.0, 34.0, 34.0, 34.0, 35.0, 35.0, 35.0, 36.0, 36.0, 36.0, 37.0, 37.0, 37.0, 38.0, 38.0, 38.0, 38.0, 39.0, 39.0, 40.0, 40.0, 41.0, 41.0, 42.0, 43.0, 43.0, 44.0, 45.0, 45.0, 46.0, 47.0, 47.0, 48.0, 49.0, 49.0, 50.0, 50.0, 52.0, 52.0, 53.0, 54.0, 55.0, 55.0, 57.0, 58.0, 59.0, 60.0, 62.0, 64.0, 65.0, 66.0, 68.0, 70.0, 70.0, 70.0, 70.0, 70.0, 71.0, 73.0, 75.0, 76.0, 78.0, 81.0, 84.0, 86.0, 90.0, 94.0, 98.0, 101.0, 106.0, 111.0, 117.0, 123.0, 130.0, 138.0, 146.0, 157.0, 163.0, 176.0, 187.0, 198.0, 214.0, 227.0, 252.0, 280.0, 327.0, 395.0, 506.0, 671.0, 1025.0, 1971.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 86
-        local_name: "variables"
-      }
-      children {
-        node_id: 87
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 88
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 89
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 232
-        local_name: "__call__"
-      }
-      children {
-        node_id: 233
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_4\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_4\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 11.0, 11.0, 11.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 13.0, 13.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 19.0, 19.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 21.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 25.0, 26.0, 26.0, 26.0, 26.0, 26.0, 26.0, 26.0, 26.0, 26.0, 26.0, 27.0, 28.0, 28.0, 28.0, 28.0, 28.0, 29.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 31.0, 32.0, 32.0, 32.0, 32.0, 32.0, 34.0, 34.0, 34.0, 34.0, 34.0, 34.0, 35.0, 36.0, 36.0, 36.0, 37.0, 38.0, 38.0, 38.0, 39.0, 40.0, 40.0, 41.0, 42.0, 42.0, 43.0, 44.0, 44.0, 46.0, 46.0, 47.0, 48.0, 48.0, 50.0, 50.0, 52.0, 52.0, 54.0, 55.0, 55.0, 56.0, 57.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 62.0, 62.0, 64.0, 65.0, 66.0, 68.0, 70.0, 72.0, 74.0, 77.0, 80.0, 82.0, 86.0, 89.0, 92.0, 96.0, 99.0, 104.0, 108.0, 114.0, 119.0, 125.0, 131.0, 139.0, 146.0, 157.0, 167.0, 176.0, 188.0, 198.0, 215.0, 236.0, 262.0, 306.0, 376.0, 462.0, 596.0, 942.0, 1428.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 90
-        local_name: "variables"
-      }
-      children {
-        node_id: 91
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 92
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 93
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 234
-        local_name: "__call__"
-      }
-      children {
-        node_id: 235
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_5\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_5\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 11.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 18.0, 20.0, 23.0, 29.0, 38.0, 60.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 94
-        local_name: "variables"
-      }
-      children {
-        node_id: 95
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 96
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 97
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 236
-        local_name: "__call__"
-      }
-      children {
-        node_id: 237
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_6\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_6\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 11.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 13.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 17.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 19.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 21.0, 21.0, 21.0, 21.0, 21.0, 21.0, 21.0, 21.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 23.0, 23.0, 23.0, 23.0, 23.0, 23.0, 23.0, 24.0, 24.0, 24.0, 24.0, 24.0, 25.0, 25.0, 25.0, 25.0, 25.0, 26.0, 26.0, 26.0, 26.0, 27.0, 27.0, 27.0, 28.0, 28.0, 28.0, 29.0, 29.0, 30.0, 30.0, 30.0, 31.0, 31.0, 32.0, 32.0, 33.0, 33.0, 34.0, 35.0, 37.0, 38.0, 40.0, 46.0, 51.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 98
-        local_name: "variables"
-      }
-      children {
-        node_id: 99
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 100
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 101
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 238
-        local_name: "__call__"
-      }
-      children {
-        node_id: 239
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_7\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_7\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [-15035.0, -15030.0, -15025.0, -15000.0, -14985.0, -14945.0, -14745.0, -70.0, -55.0, -55.0, -50.0, -50.0, -50.0, -45.0, -45.0, -45.0, -45.0, -45.0, -45.0, -45.0, -45.0, -45.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -40.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -35.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -30.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -25.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -20.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -15.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -10.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, -5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 35.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 45.0, 45.0, 45.0, 45.0, 45.0, 45.0, 45.0, 45.0, 45.0, 45.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 55.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 75.0, 75.0, 80.0, 80.0, 80.0, 85.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 95.0, 95.0, 100.0, 100.0, 105.0, 110.0, 115.0, 120.0, 125.0, 125.0, 130.0, 140.0, 140.0, 145.0, 150.0, 155.0, 160.0, 160.0, 165.0, 170.0, 175.0, 180.0, 190.0, 200.0, 210.0, 215.0, 220.0, 220.0, 230.0, 235.0, 245.0, 250.0, 260.0, 275.0, 290.0, 305.0, 325.0, 350.0, 370.0, 390.0, 425.0, 460.0, 500.0, 560.0, 650.0, 790.0, 1025.0, 1600.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 102
-        local_name: "variables"
-      }
-      children {
-        node_id: 103
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 104
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 105
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 240
-        local_name: "__call__"
-      }
-      children {
-        node_id: 241
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_8\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_8\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [18.0, 29.0, 39.0, 48.0, 57.0, 64.0, 70.0, 76.0, 82.0, 87.0, 92.0, 97.0, 101.0, 105.0, 109.0, 113.0, 116.0, 120.0, 123.0, 127.0, 130.0, 134.0, 137.0, 140.0, 143.0, 146.0, 149.0, 152.0, 156.0, 159.0, 162.0, 165.0, 168.0, 171.0, 174.0, 177.0, 180.0, 183.0, 186.0, 188.0, 191.0, 194.0, 197.0, 200.0, 203.0, 205.0, 208.0, 211.0, 214.0, 217.0, 219.0, 222.0, 225.0, 228.0, 231.0, 233.0, 236.0, 239.0, 242.0, 244.0, 247.0, 250.0, 253.0, 255.0, 258.0, 261.0, 264.0, 266.0, 269.0, 272.0, 275.0, 278.0, 280.0, 283.0, 286.0, 289.0, 292.0, 294.0, 297.0, 300.0, 303.0, 305.0, 308.0, 311.0, 314.0, 317.0, 319.0, 322.0, 325.0, 327.0, 330.0, 333.0, 336.0, 339.0, 341.0, 344.0, 347.0, 350.0, 353.0, 355.0, 358.0, 361.0, 364.0, 367.0, 370.0, 373.0, 375.0, 378.0, 381.0, 384.0, 387.0, 390.0, 393.0, 396.0, 399.0, 401.0, 404.0, 407.0, 410.0, 413.0, 416.0, 419.0, 422.0, 425.0, 428.0, 431.0, 434.0, 437.0, 440.0, 443.0, 446.0, 449.0, 452.0, 455.0, 458.0, 461.0, 464.0, 467.0, 470.0, 473.0, 476.0, 479.0, 483.0, 486.0, 489.0, 492.0, 495.0, 498.0, 501.0, 504.0, 507.0, 511.0, 514.0, 517.0, 520.0, 523.0, 526.0, 530.0, 533.0, 536.0, 539.0, 542.0, 545.0, 549.0, 552.0, 555.0, 558.0, 562.0, 565.0, 569.0, 572.0, 575.0, 579.0, 582.0, 585.0, 589.0, 592.0, 595.0, 599.0, 602.0, 605.0, 609.0, 612.0, 616.0, 620.0, 623.0, 626.0, 630.0, 634.0, 637.0, 641.0, 644.0, 648.0, 651.0, 655.0, 658.0, 662.0, 665.0, 669.0, 672.0, 676.0, 680.0, 683.0, 687.0, 691.0, 694.0, 698.0, 702.0, 705.0, 709.0, 712.0, 716.0, 720.0, 724.0, 727.0, 731.0, 735.0, 739.0, 742.0, 746.0, 750.0, 754.0, 758.0, 761.0, 765.0, 769.0, 773.0, 777.0, 780.0, 784.0, 788.0, 792.0, 796.0, 800.0, 804.0, 808.0, 812.0, 816.0, 820.0, 823.0, 828.0, 832.0, 836.0, 840.0, 844.0, 848.0, 852.0, 856.0, 860.0, 864.0, 868.0, 873.0, 877.0, 881.0, 885.0, 889.0, 893.0, 897.0, 902.0, 906.0, 910.0, 914.0, 919.0, 923.0, 927.0, 931.0, 935.0, 940.0, 944.0, 948.0, 953.0, 957.0, 962.0, 966.0, 970.0, 975.0, 979.0, 984.0, 988.0, 993.0, 997.0, 1002.0, 1006.0, 1011.0, 1015.0, 1020.0, 1024.0, 1029.0, 1034.0, 1038.0, 1043.0, 1047.0, 1052.0, 1057.0, 1062.0, 1066.0, 1071.0, 1076.0, 1081.0, 1086.0, 1090.0, 1095.0, 1100.0, 1105.0, 1110.0, 1114.0, 1119.0, 1124.0, 1129.0, 1134.0, 1139.0, 1144.0, 1149.0, 1154.0, 1159.0, 1164.0, 1169.0, 1174.0, 1179.0, 1184.0, 1189.0, 1194.0, 1199.0, 1204.0, 1209.0, 1215.0, 1220.0, 1225.0, 1230.0, 1235.0, 1241.0, 1246.0, 1251.0, 1257.0, 1262.0, 1267.0, 1273.0, 1278.0, 1284.0, 1289.0, 1294.0, 1300.0, 1305.0, 1311.0, 1316.0, 1322.0, 1327.0, 1333.0, 1338.0, 1344.0, 1350.0, 1355.0, 1361.0, 1367.0, 1372.0, 1378.0, 1383.0, 1389.0, 1395.0, 1401.0, 1407.0, 1413.0, 1418.0, 1424.0, 1430.0, 1436.0, 1442.0, 1448.0, 1454.0, 1459.0, 1465.0, 1472.0, 1477.0, 1483.0, 1489.0, 1495.0, 1501.0, 1507.0, 1514.0, 1520.0, 1526.0, 1532.0, 1538.0, 1545.0, 1551.0, 1557.0, 1564.0, 1570.0, 1576.0, 1583.0, 1589.0, 1596.0, 1602.0, 1608.0, 1615.0, 1621.0, 1628.0, 1634.0, 1641.0, 1647.0, 1654.0, 1661.0, 1667.0, 1674.0, 1681.0, 1687.0, 1694.0, 1701.0, 1708.0, 1715.0, 1722.0, 1729.0, 1735.0, 1742.0, 1749.0, 1756.0, 1763.0, 1770.0, 1777.0, 1784.0, 1791.0, 1798.0, 1806.0, 1812.0, 1820.0, 1827.0, 1835.0, 1841.0, 1849.0, 1856.0, 1863.0, 1871.0, 1878.0, 1885.0, 1893.0, 1901.0, 1908.0, 1915.0, 1923.0, 1930.0, 1938.0, 1946.0, 1953.0, 1961.0, 1969.0, 1976.0, 1984.0, 1992.0, 2000.0, 2007.0, 2015.0, 2023.0, 2031.0, 2039.0, 2047.0, 2055.0, 2063.0, 2071.0, 2079.0, 2087.0, 2095.0, 2104.0, 2112.0, 2120.0, 2128.0, 2137.0, 2146.0, 2154.0, 2162.0, 2171.0, 2179.0, 2188.0, 2197.0, 2205.0, 2214.0, 2223.0, 2232.0, 2241.0, 2250.0, 2258.0, 2268.0, 2277.0, 2285.0, 2294.0, 2304.0, 2313.0, 2322.0, 2331.0, 2340.0, 2350.0, 2359.0, 2368.0, 2378.0, 2388.0, 2397.0, 2407.0, 2416.0, 2426.0, 2436.0, 2446.0, 2455.0, 2465.0, 2475.0, 2485.0, 2495.0, 2505.0, 2515.0, 2525.0, 2535.0, 2545.0, 2556.0, 2566.0, 2577.0, 2587.0, 2598.0, 2609.0, 2620.0, 2631.0, 2641.0, 2652.0, 2663.0, 2674.0, 2685.0, 2696.0, 2708.0, 2719.0, 2730.0, 2742.0, 2753.0, 2764.0, 2776.0, 2788.0, 2799.0, 2811.0, 2823.0, 2835.0, 2847.0, 2858.0, 2870.0, 2882.0, 2894.0, 2906.0, 2919.0, 2931.0, 2943.0, 2956.0, 2968.0, 2981.0, 2994.0, 3006.0, 3019.0, 3032.0, 3045.0, 3058.0, 3070.0, 3083.0, 3096.0, 3109.0, 3121.0, 3134.0, 3148.0, 3161.0, 3174.0, 3187.0, 3200.0, 3214.0, 3228.0, 3242.0, 3255.0, 3268.0, 3283.0, 3297.0, 3310.0, 3325.0, 3340.0, 3353.0, 3368.0, 3383.0, 3398.0, 3412.0, 3427.0, 3442.0, 3457.0, 3471.0, 3487.0, 3502.0, 3516.0, 3531.0, 3546.0, 3561.0, 3577.0, 3593.0, 3608.0, 3625.0, 3641.0, 3657.0, 3673.0, 3690.0, 3706.0, 3722.0, 3738.0, 3755.0, 3772.0, 3789.0, 3805.0, 3823.0, 3839.0, 3856.0, 3873.0, 3891.0, 3908.0, 3926.0, 3944.0, 3960.0, 3977.0, 3995.0, 4013.0, 4031.0, 4048.0, 4067.0, 4085.0, 4104.0, 4122.0, 4140.0, 4159.0, 4177.0, 4196.0, 4215.0, 4234.0, 4253.0, 4272.0, 4291.0, 4311.0, 4332.0, 4351.0, 4371.0, 4391.0, 4412.0, 4433.0, 4454.0, 4474.0, 4496.0, 4518.0, 4538.0, 4558.0, 4579.0, 4601.0, 4619.0, 4640.0, 4662.0, 4684.0, 4706.0, 4728.0, 4751.0, 4771.0, 4794.0, 4818.0, 4840.0, 4863.0, 4887.0, 4910.0, 4933.0, 4956.0, 4980.0, 5004.0, 5028.0, 5052.0, 5076.0, 5100.0, 5125.0, 5152.0, 5175.0, 5200.0, 5226.0, 5251.0, 5278.0, 5304.0, 5329.0, 5354.0, 5381.0, 5407.0, 5433.0, 5460.0, 5488.0, 5516.0, 5544.0, 5573.0, 5600.0, 5628.0, 5656.0, 5684.0, 5713.0, 5741.0, 5771.0, 5799.0, 5830.0, 5860.0, 5891.0, 5921.0, 5951.0, 5980.0, 6010.0, 6041.0, 6073.0, 6105.0, 6133.0, 6163.0, 6195.0, 6227.0, 6258.0, 6291.0, 6322.0, 6356.0, 6390.0, 6424.0, 6457.0, 6491.0, 6527.0, 6561.0, 6596.0, 6631.0, 6665.0, 6701.0, 6736.0, 6771.0, 6805.0, 6840.0, 6877.0, 6911.0, 6947.0, 6985.0, 7022.0, 7059.0, 7097.0, 7135.0, 7174.0, 7212.0, 7251.0, 7289.0, 7327.0, 7366.0, 7406.0, 7447.0, 7486.0, 7525.0, 7566.0, 7606.0, 7646.0, 7688.0, 7728.0, 7771.0, 7814.0, 7859.0, 7901.0, 7949.0, 7992.0, 8036.0, 8082.0, 8127.0, 8173.0, 8218.0, 8262.0, 8309.0, 8353.0, 8397.0, 8444.0, 8489.0, 8539.0, 8585.0, 8632.0, 8682.0, 8727.0, 8777.0, 8828.0, 8879.0, 8929.0, 8982.0, 9037.0, 9087.0, 9140.0, 9193.0, 9250.0, 9305.0, 9361.0, 9418.0, 9475.0, 9532.0, 9589.0, 9644.0, 9699.0, 9758.0, 9818.0, 9875.0, 9935.0, 9997.0, 10057.0, 10117.0, 10174.0, 10232.0, 10296.0, 10356.0, 10419.0, 10482.0, 10546.0, 10608.0, 10670.0, 10729.0, 10790.0, 10855.0, 10920.0, 10990.0, 11054.0, 11118.0, 11181.0, 11248.0, 11316.0, 11385.0, 11454.0, 11526.0, 11597.0, 11667.0, 11740.0, 11820.0, 11897.0, 11973.0, 12046.0, 12126.0, 12204.0, 12287.0, 12370.0, 12456.0, 12538.0, 12627.0, 12714.0, 12799.0, 12883.0, 12971.0, 13062.0, 13154.0, 13233.0, 13328.0, 13418.0, 13511.0, 13607.0, 13709.0, 13806.0, 13903.0, 14002.0, 14104.0, 14200.0, 14288.0, 14391.0, 14488.0, 14590.0, 14698.0, 14808.0, 14910.0, 15020.0, 15126.0, 15238.0, 15347.0, 15456.0, 15574.0, 15692.0, 15786.0, 15896.0, 16016.0, 16136.0, 16250.0, 16352.0, 16474.0, 16575.0, 16702.0, 16835.0, 16965.0, 17096.0, 17232.0, 17370.0, 17443.0, 17581.0, 17719.0, 17864.0, 17976.0, 18116.0, 18250.0, 18396.0, 18540.0, 18690.0, 18840.0, 18989.0, 19136.0, 19294.0, 19445.0, 19589.0, 19750.0, 19905.0, 20064.0, 20191.0, 20325.0, 20497.0, 20662.0, 20833.0, 20981.0, 21152.0, 21334.0, 21510.0, 21642.0, 21821.0, 22001.0, 22186.0, 22379.0, 22568.0, 22770.0, 22958.0, 23162.0, 23360.0, 23524.0, 23737.0, 23960.0, 24175.0, 24395.0, 24631.0, 24865.0, 25091.0, 25327.0, 25580.0, 25833.0, 26089.0, 26361.0, 26636.0, 26889.0, 27155.0, 27436.0, 27715.0, 28003.0, 28303.0, 28600.0, 28916.0, 29223.0, 29553.0, 29884.0, 30200.0, 30538.0, 30868.0, 31211.0, 31548.0, 31881.0, 32253.0, 32605.0, 32980.0, 33385.0, 33805.0, 34254.0, 34723.0, 35167.0, 35666.0, 36125.0, 36652.0, 37177.0, 37739.0, 38321.0, 38932.0, 39640.0, 40337.0, 41000.0, 41626.0, 42385.0, 43122.0, 43890.0, 44687.0, 45609.0, 46520.0, 47489.0, 48432.0, 49458.0, 50511.0, 51561.0, 52568.0, 53676.0, 54936.0, 56071.0, 57302.0, 58513.0, 59800.0, 61192.0, 62702.0, 64205.0, 65868.0, 67780.0, 69960.0, 72330.0, 74918.0, 77540.0, 80344.0, 83727.0, 87662.0, 93589.0, 101441.0, 110544.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 106
-        local_name: "variables"
-      }
-      children {
-        node_id: 107
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 108
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 109
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 242
-        local_name: "__call__"
-      }
-      children {
-        node_id: 243
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_9\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_9\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAgAAAAQAAAATAAAAcxgAAACIAHwAgwF9AXQAagF8AXQAagJkAY0CUwApAk4pAdoF\\nZHR5cGUpA9oCdGbaCnplcm9zX2xpa2XaB2Zsb2F0MzIpAtoDb2Jz2gxleHBhbmRlZF9vYnMpAdoO\\nZXhwYW5kX2RpbXNfb3CpAPr0L2V4cG9ydC9oZGEzL2JvcmdsZXQvbG9jYWxfcmFtX2ZzX2RpcnMv\\nMC55dW5kaV9tdXBwZXRfMF8xMjI3MDgzMy4xMy55dW5kaS4xOTQ3MzE0MTc5NjEuOGY0ZjlmOThj\\nYjdhMzA1NS9idWlsZF90YXJnZXRfdHJhaW5fcGFyX2Q5NzU3NTM3MDE2YTJlYjgvdHJhaW4ucGFy\\nL2dvb2dsZTMvbGVhcm5pbmcvc21hcnRjaG9pY2VzL3Jlc2VhcmNoL2NsaWVudHMvY29tcGlsZXJf\\nb3B0L3BvbGljeV90cmFpbmluZy9mZWF0dXJlX29wcy5wedoPZGlzY2FyZF9mZWF0dXJlJwAAAHME\\nAAAAAAEIAQ==\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 110
-        local_name: "variables"
-      }
-      children {
-        node_id: 111
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 112
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 113
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 244
-        local_name: "__call__"
-      }
-      children {
-        node_id: 245
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_10\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_10\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [13.0, 38.0, 56.0, 70.0, 82.0, 94.0, 104.0, 114.0, 123.0, 131.0, 139.0, 148.0, 152.0, 153.0, 158.0, 163.0, 170.0, 174.0, 178.0, 180.0, 183.0, 186.0, 188.0, 190.0, 192.0, 196.0, 198.0, 201.0, 205.0, 208.0, 212.0, 215.0, 219.0, 221.0, 225.0, 227.0, 229.0, 232.0, 233.0, 236.0, 239.0, 242.0, 245.0, 248.0, 250.0, 252.0, 254.0, 256.0, 259.0, 261.0, 264.0, 267.0, 270.0, 272.0, 275.0, 278.0, 280.0, 283.0, 285.0, 287.0, 290.0, 293.0, 295.0, 297.0, 300.0, 303.0, 305.0, 308.0, 311.0, 313.0, 316.0, 319.0, 322.0, 325.0, 329.0, 331.0, 333.0, 336.0, 338.0, 340.0, 343.0, 345.0, 347.0, 347.0, 349.0, 351.0, 353.0, 355.0, 357.0, 359.0, 361.0, 363.0, 365.0, 368.0, 369.0, 371.0, 373.0, 375.0, 377.0, 380.0, 382.0, 385.0, 387.0, 389.0, 391.0, 394.0, 396.0, 398.0, 400.0, 403.0, 405.0, 408.0, 410.0, 412.0, 415.0, 417.0, 420.0, 422.0, 425.0, 427.0, 429.0, 432.0, 434.0, 437.0, 439.0, 442.0, 444.0, 446.0, 449.0, 451.0, 454.0, 456.0, 458.0, 461.0, 463.0, 466.0, 469.0, 472.0, 474.0, 476.0, 479.0, 482.0, 483.0, 486.0, 489.0, 492.0, 495.0, 498.0, 500.0, 503.0, 505.0, 508.0, 510.0, 513.0, 516.0, 519.0, 522.0, 524.0, 528.0, 530.0, 533.0, 536.0, 539.0, 541.0, 544.0, 547.0, 550.0, 553.0, 556.0, 559.0, 561.0, 563.0, 567.0, 570.0, 572.0, 575.0, 577.0, 580.0, 584.0, 586.0, 589.0, 592.0, 595.0, 598.0, 601.0, 605.0, 607.0, 611.0, 613.0, 617.0, 620.0, 623.0, 626.0, 629.0, 632.0, 635.0, 639.0, 642.0, 645.0, 648.0, 651.0, 654.0, 657.0, 660.0, 662.0, 666.0, 669.0, 672.0, 676.0, 679.0, 682.0, 685.0, 688.0, 690.0, 693.0, 696.0, 699.0, 702.0, 705.0, 709.0, 712.0, 714.0, 718.0, 721.0, 724.0, 726.0, 728.0, 729.0, 731.0, 734.0, 737.0, 741.0, 745.0, 748.0, 750.0, 753.0, 756.0, 760.0, 763.0, 766.0, 770.0, 773.0, 776.0, 779.0, 782.0, 786.0, 788.0, 793.0, 796.0, 798.0, 802.0, 805.0, 808.0, 811.0, 815.0, 818.0, 820.0, 824.0, 827.0, 829.0, 832.0, 835.0, 838.0, 842.0, 846.0, 849.0, 854.0, 857.0, 860.0, 864.0, 867.0, 871.0, 875.0, 879.0, 882.0, 887.0, 890.0, 893.0, 897.0, 901.0, 905.0, 908.0, 911.0, 915.0, 918.0, 921.0, 925.0, 929.0, 932.0, 934.0, 937.0, 940.0, 943.0, 946.0, 950.0, 953.0, 956.0, 961.0, 965.0, 969.0, 973.0, 976.0, 980.0, 982.0, 985.0, 990.0, 994.0, 997.0, 1001.0, 1005.0, 1007.0, 1010.0, 1014.0, 1018.0, 1022.0, 1025.0, 1028.0, 1033.0, 1035.0, 1038.0, 1042.0, 1047.0, 1052.0, 1056.0, 1060.0, 1063.0, 1067.0, 1071.0, 1075.0, 1079.0, 1083.0, 1086.0, 1088.0, 1092.0, 1097.0, 1102.0, 1106.0, 1109.0, 1113.0, 1117.0, 1120.0, 1125.0, 1129.0, 1134.0, 1137.0, 1142.0, 1146.0, 1150.0, 1151.0, 1155.0, 1159.0, 1162.0, 1166.0, 1170.0, 1174.0, 1177.0, 1181.0, 1185.0, 1188.0, 1193.0, 1196.0, 1203.0, 1207.0, 1212.0, 1214.0, 1217.0, 1220.0, 1222.0, 1222.0, 1226.0, 1229.0, 1233.0, 1237.0, 1241.0, 1246.0, 1250.0, 1253.0, 1257.0, 1262.0, 1267.0, 1272.0, 1278.0, 1283.0, 1287.0, 1293.0, 1297.0, 1301.0, 1304.0, 1309.0, 1315.0, 1320.0, 1325.0, 1329.0, 1333.0, 1336.0, 1341.0, 1344.0, 1348.0, 1351.0, 1357.0, 1363.0, 1368.0, 1374.0, 1379.0, 1383.0, 1386.0, 1391.0, 1395.0, 1399.0, 1403.0, 1407.0, 1410.0, 1415.0, 1418.0, 1423.0, 1428.0, 1432.0, 1436.0, 1438.0, 1442.0, 1446.0, 1450.0, 1454.0, 1462.0, 1467.0, 1472.0, 1477.0, 1483.0, 1488.0, 1492.0, 1496.0, 1503.0, 1508.0, 1513.0, 1518.0, 1520.0, 1526.0, 1531.0, 1534.0, 1538.0, 1542.0, 1546.0, 1552.0, 1558.0, 1564.0, 1568.0, 1573.0, 1578.0, 1581.0, 1590.0, 1596.0, 1601.0, 1606.0, 1611.0, 1616.0, 1622.0, 1629.0, 1634.0, 1640.0, 1647.0, 1651.0, 1657.0, 1660.0, 1665.0, 1672.0, 1678.0, 1686.0, 1692.0, 1698.0, 1704.0, 1709.0, 1714.0, 1719.0, 1724.0, 1730.0, 1737.0, 1744.0, 1751.0, 1755.0, 1761.0, 1764.0, 1772.0, 1778.0, 1784.0, 1789.0, 1799.0, 1804.0, 1811.0, 1819.0, 1825.0, 1830.0, 1838.0, 1849.0, 1858.0, 1862.0, 1868.0, 1872.0, 1878.0, 1885.0, 1888.0, 1892.0, 1897.0, 1902.0, 1907.0, 1919.0, 1926.0, 1932.0, 1936.0, 1941.0, 1946.0, 1952.0, 1960.0, 1968.0, 1977.0, 1985.0, 1992.0, 1997.0, 2006.0, 2012.0, 2018.0, 2026.0, 2034.0, 2044.0, 2050.0, 2057.0, 2064.0, 2069.0, 2075.0, 2082.0, 2091.0, 2098.0, 2107.0, 2122.0, 2126.0, 2135.0, 2146.0, 2149.0, 2157.0, 2163.0, 2172.0, 2178.0, 2184.0, 2191.0, 2198.0, 2208.0, 2216.0, 2223.0, 2235.0, 2242.0, 2252.0, 2263.0, 2272.0, 2277.0, 2288.0, 2296.0, 2306.0, 2311.0, 2318.0, 2323.0, 2334.0, 2341.0, 2356.0, 2366.0, 2373.0, 2379.0, 2386.0, 2407.0, 2416.0, 2423.0, 2432.0, 2438.0, 2448.0, 2453.0, 2464.0, 2473.0, 2473.0, 2481.0, 2492.0, 2504.0, 2511.0, 2523.0, 2529.0, 2537.0, 2545.0, 2556.0, 2566.0, 2575.0, 2584.0, 2592.0, 2602.0, 2613.0, 2624.0, 2636.0, 2643.0, 2647.0, 2652.0, 2664.0, 2675.0, 2688.0, 2693.0, 2702.0, 2709.0, 2722.0, 2739.0, 2754.0, 2766.0, 2776.0, 2786.0, 2799.0, 2810.0, 2832.0, 2840.0, 2849.0, 2860.0, 2873.0, 2889.0, 2908.0, 2914.0, 2926.0, 2939.0, 2950.0, 2961.0, 2969.0, 2978.0, 2990.0, 2999.0, 3023.0, 3032.0, 3049.0, 3066.0, 3085.0, 3101.0, 3107.0, 3117.0, 3129.0, 3144.0, 3167.0, 3190.0, 3212.0, 3229.0, 3238.0, 3264.0, 3293.0, 3302.0, 3309.0, 3314.0, 3323.0, 3344.0, 3352.0, 3362.0, 3390.0, 3400.0, 3411.0, 3435.0, 3456.0, 3470.0, 3485.0, 3498.0, 3505.0, 3519.0, 3539.0, 3545.0, 3545.0, 3560.0, 3576.0, 3597.0, 3607.0, 3621.0, 3641.0, 3665.0, 3679.0, 3701.0, 3714.0, 3733.0, 3741.0, 3745.0, 3757.0, 3773.0, 3787.0, 3795.0, 3805.0, 3822.0, 3835.0, 3844.0, 3861.0, 3872.0, 3878.0, 3897.0, 3919.0, 3941.0, 3971.0, 4004.0, 4014.0, 4019.0, 4061.0, 4068.0, 4089.0, 4108.0, 4117.0, 4125.0, 4146.0, 4165.0, 4194.0, 4204.0, 4224.0, 4236.0, 4263.0, 4290.0, 4301.0, 4319.0, 4326.0, 4347.0, 4369.0, 4386.0, 4413.0, 4435.0, 4451.0, 4451.0, 4451.0, 4476.0, 4500.0, 4539.0, 4579.0, 4592.0, 4600.0, 4622.0, 4650.0, 4683.0, 4714.0, 4742.0, 4755.0, 4771.0, 4788.0, 4816.0, 4828.0, 4831.0, 4831.0, 4831.0, 4843.0, 4852.0, 4865.0, 4896.0, 4915.0, 4931.0, 4952.0, 4965.0, 4983.0, 5007.0, 5043.0, 5061.0, 5081.0, 5095.0, 5122.0, 5143.0, 5171.0, 5204.0, 5226.0, 5233.0, 5250.0, 5281.0, 5320.0, 5323.0, 5328.0, 5345.0, 5374.0, 5413.0, 5466.0, 5492.0, 5524.0, 5555.0, 5567.0, 5610.0, 5676.0, 5701.0, 5716.0, 5744.0, 5768.0, 5795.0, 5818.0, 5854.0, 5906.0, 5934.0, 5960.0, 5975.0, 5993.0, 6025.0, 6034.0, 6051.0, 6082.0, 6106.0, 6125.0, 6159.0, 6187.0, 6242.0, 6287.0, 6311.0, 6332.0, 6348.0, 6358.0, 6368.0, 6377.0, 6402.0, 6407.0, 6428.0, 6450.0, 6475.0, 6498.0, 6505.0, 6533.0, 6565.0, 6580.0, 6595.0, 6611.0, 6654.0, 6658.0, 6705.0, 6751.0, 6786.0, 6828.0, 6876.0, 6896.0, 6948.0, 6964.0, 7065.0, 7082.0, 7118.0, 7184.0, 7214.0, 7271.0, 7310.0, 7357.0, 7405.0, 7506.0, 7613.0, 7641.0, 7675.0, 7720.0, 7781.0, 7833.0, 7860.0, 7898.0, 7929.0, 8044.0, 8104.0, 8148.0, 8236.0, 8273.0, 8313.0, 8349.0, 8381.0, 8409.0, 8498.0, 8507.0, 8524.0, 8570.0, 8607.0, 8630.0, 8637.0, 8675.0, 8700.0, 8714.0, 8734.0, 8776.0, 8836.0, 8854.0, 8867.0, 8868.0, 9065.0, 9113.0, 9121.0, 9241.0, 9357.0, 9360.0, 9585.0, 9613.0, 9684.0, 9727.0, 9751.0, 9777.0, 9802.0, 9889.0, 9903.0, 9914.0, 9978.0, 10061.0, 10192.0, 10213.0, 10345.0, 10369.0, 10404.0, 10430.0, 10471.0, 10481.0, 10489.0, 10492.0, 10494.0, 10524.0, 10554.0, 10557.0, 10560.0, 10562.0, 10641.0, 10716.0, 10842.0, 10897.0, 10967.0, 11053.0, 11128.0, 11137.0, 11328.0, 11336.0, 11401.0, 11532.0, 11573.0, 11860.0, 11880.0, 12013.0, 12305.0, 12358.0, 12386.0, 12404.0, 12456.0, 12456.0, 12476.0, 12615.0, 12677.0, 12981.0, 13094.0, 13197.0, 13708.0, 13717.0, 13788.0, 14049.0, 14112.0, 14224.0, 14257.0, 14681.0, 14901.0, 15006.0, 15071.0, 15100.0, 15248.0, 15669.0, 15877.0, 15953.0, 15953.0, 16066.0, 16072.0, 16271.0, 16292.0, 16386.0, 16490.0, 16633.0, 16670.0, 16834.0, 16896.0, 17543.0, 17693.0, 17800.0, 17859.0, 18397.0, 18811.0, 18826.0, 18971.0, 19304.0, 19319.0, 19695.0, 20378.0, 20865.0, 21313.0, 21330.0, 22321.0, 22760.0, 22770.0, 23783.0, 23785.0, 24525.0, 24844.0, 24848.0, 24964.0, 24966.0, 27468.0, 27478.0, 27555.0, 27555.0, 28215.0, 28219.0, 28336.0, 28490.0, 30213.0, 30228.0, 30242.0, 34116.0, 43518.0, 43518.0, 43518.0, 43852.0, 43852.0, 43852.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 114
-        local_name: "variables"
-      }
-      children {
-        node_id: 115
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 116
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 117
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 246
-        local_name: "__call__"
-      }
-      children {
-        node_id: 247
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Lambda\", \"name\": \"lambda_11\", \"trainable\": true, \"expects_training_arg\": true, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"lambda_11\", \"trainable\": true, \"dtype\": \"float32\", \"function\": {\"class_name\": \"__tuple__\", \"items\": [\"4wEAAAAAAAAAAwAAAAUAAAATAAAAc0QAAACIAHwAgwF9AXQAagF0AmoDfAGIAYMCdABqBIMCdAWI\\nAYMBGwB9AnQAagZ8AnQAagd8AoMBfAJ8AhQAZwNkA2QCjQJTACkETukBAAAAKQHaBGF4aXPp////\\n/ykI2gJ0ZtoEY2FzdNoOY29udHJpYl9sYXllcnPaCWJ1Y2tldGl6ZdoHZmxvYXQzMtoDbGVu2gZj\\nb25jYXTaBHNxcnQpA9oDb2Jz2gxleHBhbmRlZF9vYnPaAXgpAtoOZXhwYW5kX2RpbXNfb3DaCHF1\\nYW50aWxlqQD69C9leHBvcnQvaGRhMy9ib3JnbGV0L2xvY2FsX3JhbV9mc19kaXJzLzAueXVuZGlf\\nbXVwcGV0XzBfMTIyNzA4MzMuMTMueXVuZGkuMTk0NzMxNDE3OTYxLjhmNGY5Zjk4Y2I3YTMwNTUv\\nYnVpbGRfdGFyZ2V0X3RyYWluX3Bhcl9kOTc1NzUzNzAxNmEyZWI4L3RyYWluLnBhci9nb29nbGUz\\nL2xlYXJuaW5nL3NtYXJ0Y2hvaWNlcy9yZXNlYXJjaC9jbGllbnRzL2NvbXBpbGVyX29wdC9wb2xp\\nY3lfdHJhaW5pbmcvZmVhdHVyZV9vcHMucHnaDW5vcm1hbGl6YXRpb24wAAAAcwoAAAAAAQgBBAEK\\nARAB\\n\", null, {\"class_name\": \"__tuple__\", \"items\": [{\"class_name\": \"ExpandDims\", \"config\": {\"name\": \"expand_dims\", \"trainable\": true, \"dtype\": \"float32\", \"axis\": -1}}, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 4.0]]}]}, \"function_type\": \"lambda\", \"module\": \"google3.learning.smartchoices.research.clients.compiler_opt.policy_training.feature_ops\", \"output_shape\": null, \"output_shape_type\": \"raw\", \"output_shape_module\": null, \"arguments\": {}}}"
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 118
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 53
-        local_name: "variables"
-      }
-      children {
-        node_id: 119
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 120
-        local_name: "metrics"
-      }
-      children {
-        node_id: 121
-        local_name: "layers"
-      }
-      children {
-        node_id: 54
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 122
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 55
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 222
-        local_name: "__call__"
-      }
-      children {
-        node_id: 223
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 223
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 123
-        local_name: "variables"
-      }
-      children {
-        node_id: 124
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 125
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 126
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 248
-        local_name: "__call__"
-      }
-      children {
-        node_id: 249
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Flatten\", \"name\": \"flatten\", \"trainable\": true, \"expects_training_arg\": false, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"flatten\", \"trainable\": true, \"dtype\": \"float32\", \"data_format\": \"channels_last\"}, \"input_spec\": {\"class_name\": \"InputSpec\", \"config\": {\"dtype\": null, \"shape\": null, \"ndim\": null, \"max_ndim\": null, \"min_ndim\": 1, \"axes\": {}}}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "kernel"
-      }
-      children {
-        node_id: 11
-        local_name: "bias"
-      }
-      children {
-        node_id: 127
-        local_name: "variables"
-      }
-      children {
-        node_id: 128
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 129
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 130
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 250
-        local_name: "__call__"
-      }
-      children {
-        node_id: 251
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Dense\", \"name\": \"dense\", \"trainable\": true, \"expects_training_arg\": false, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"dense\", \"trainable\": true, \"dtype\": \"float32\", \"units\": 100, \"activation\": \"relu\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 2.0, \"mode\": \"fan_in\", \"distribution\": \"truncated_normal\", \"seed\": null, \"dtype\": \"float32\"}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}, \"input_spec\": {\"class_name\": \"InputSpec\", \"config\": {\"dtype\": null, \"shape\": null, \"ndim\": null, \"max_ndim\": null, \"min_ndim\": 2, \"axes\": {\"-1\": 34}}}, \"build_input_shape\": {\"class_name\": \"TensorShape\", \"items\": [0, 34]}}"
-      }
-    }
-    nodes {
-      children {
-        node_id: 12
-        local_name: "kernel"
-      }
-      children {
-        node_id: 13
-        local_name: "bias"
-      }
-      children {
-        node_id: 131
-        local_name: "variables"
-      }
-      children {
-        node_id: 132
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 133
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 134
-        local_name: "keras_api"
-      }
-      children {
-        node_id: 252
-        local_name: "__call__"
-      }
-      children {
-        node_id: 253
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      user_object {
-        identifier: "_tf_keras_layer"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-        metadata: "{\"class_name\": \"Dense\", \"name\": \"dense_1\", \"trainable\": true, \"expects_training_arg\": false, \"dtype\": \"float32\", \"batch_input_shape\": null, \"stateful\": false, \"config\": {\"name\": \"dense_1\", \"trainable\": true, \"dtype\": \"float32\", \"units\": 40, \"activation\": \"relu\", \"use_bias\": true, \"kernel_initializer\": {\"class_name\": \"VarianceScaling\", \"config\": {\"scale\": 2.0, \"mode\": \"fan_in\", \"distribution\": \"truncated_normal\", \"seed\": null, \"dtype\": \"float32\"}}, \"bias_initializer\": {\"class_name\": \"Zeros\", \"config\": {}}, \"kernel_regularizer\": null, \"bias_regularizer\": null, \"activity_regularizer\": null, \"kernel_constraint\": null, \"bias_constraint\": null}, \"input_spec\": {\"class_name\": \"InputSpec\", \"config\": {\"dtype\": null, \"shape\": null, \"ndim\": null, \"max_ndim\": null, \"min_ndim\": 2, \"axes\": {\"-1\": 100}}}, \"build_input_shape\": {\"class_name\": \"TensorShape\", \"items\": [0, 100]}}"
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 41
-        local_name: "0"
-      }
-      children {
-        node_id: 42
-        local_name: "1"
-      }
-      children {
-        node_id: 43
-        local_name: "2"
-      }
-      children {
-        node_id: 44
-        local_name: "3"
-      }
-      children {
-        node_id: 45
-        local_name: "4"
-      }
-      children {
-        node_id: 46
-        local_name: "5"
-      }
-      children {
-        node_id: 47
-        local_name: "6"
-      }
-      children {
-        node_id: 48
-        local_name: "7"
-      }
-      children {
-        node_id: 49
-        local_name: "8"
-      }
-      children {
-        node_id: 50
-        local_name: "9"
-      }
-      children {
-        node_id: 51
-        local_name: "10"
-      }
-      children {
-        node_id: 52
-        local_name: "11"
-      }
-      children {
-        node_id: 26
-        local_name: "12"
-      }
-      children {
-        node_id: 57
-        local_name: "13"
-      }
-      children {
-        node_id: 58
-        local_name: "14"
-      }
-      children {
-        node_id: 59
-        local_name: "15"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 135
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 70
-        local_name: "variables"
-      }
-      children {
-        node_id: 136
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 137
-        local_name: "metrics"
-      }
-      children {
-        node_id: 138
-        local_name: "layers"
-      }
-      children {
-        node_id: 71
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 139
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 72
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 224
-        local_name: "__call__"
-      }
-      children {
-        node_id: 225
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 225
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 140
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 74
-        local_name: "variables"
-      }
-      children {
-        node_id: 141
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 142
-        local_name: "metrics"
-      }
-      children {
-        node_id: 143
-        local_name: "layers"
-      }
-      children {
-        node_id: 75
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 144
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 76
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 226
-        local_name: "__call__"
-      }
-      children {
-        node_id: 227
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 227
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 145
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 78
-        local_name: "variables"
-      }
-      children {
-        node_id: 146
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 147
-        local_name: "metrics"
-      }
-      children {
-        node_id: 148
-        local_name: "layers"
-      }
-      children {
-        node_id: 79
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 149
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 80
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 228
-        local_name: "__call__"
-      }
-      children {
-        node_id: 229
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 229
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 150
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 82
-        local_name: "variables"
-      }
-      children {
-        node_id: 151
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 152
-        local_name: "metrics"
-      }
-      children {
-        node_id: 153
-        local_name: "layers"
-      }
-      children {
-        node_id: 83
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 154
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 84
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 230
-        local_name: "__call__"
-      }
-      children {
-        node_id: 231
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 231
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 155
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 86
-        local_name: "variables"
-      }
-      children {
-        node_id: 156
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 157
-        local_name: "metrics"
-      }
-      children {
-        node_id: 158
-        local_name: "layers"
-      }
-      children {
-        node_id: 87
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 159
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 88
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 232
-        local_name: "__call__"
-      }
-      children {
-        node_id: 233
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 233
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 160
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 90
-        local_name: "variables"
-      }
-      children {
-        node_id: 161
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 162
-        local_name: "metrics"
-      }
-      children {
-        node_id: 163
-        local_name: "layers"
-      }
-      children {
-        node_id: 91
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 164
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 92
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 234
-        local_name: "__call__"
-      }
-      children {
-        node_id: 235
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 235
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 165
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 94
-        local_name: "variables"
-      }
-      children {
-        node_id: 166
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 167
-        local_name: "metrics"
-      }
-      children {
-        node_id: 168
-        local_name: "layers"
-      }
-      children {
-        node_id: 95
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 169
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 96
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 236
-        local_name: "__call__"
-      }
-      children {
-        node_id: 237
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 237
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 170
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 98
-        local_name: "variables"
-      }
-      children {
-        node_id: 171
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 172
-        local_name: "metrics"
-      }
-      children {
-        node_id: 173
-        local_name: "layers"
-      }
-      children {
-        node_id: 99
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 174
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 100
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 238
-        local_name: "__call__"
-      }
-      children {
-        node_id: 239
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 239
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 175
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 102
-        local_name: "variables"
-      }
-      children {
-        node_id: 176
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 177
-        local_name: "metrics"
-      }
-      children {
-        node_id: 178
-        local_name: "layers"
-      }
-      children {
-        node_id: 103
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 179
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 104
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 240
-        local_name: "__call__"
-      }
-      children {
-        node_id: 241
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 241
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 180
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 106
-        local_name: "variables"
-      }
-      children {
-        node_id: 181
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 182
-        local_name: "metrics"
-      }
-      children {
-        node_id: 183
-        local_name: "layers"
-      }
-      children {
-        node_id: 107
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 184
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 108
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 242
-        local_name: "__call__"
-      }
-      children {
-        node_id: 243
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 243
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 185
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 110
-        local_name: "variables"
-      }
-      children {
-        node_id: 186
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 187
-        local_name: "metrics"
-      }
-      children {
-        node_id: 188
-        local_name: "layers"
-      }
-      children {
-        node_id: 111
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 189
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 112
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 244
-        local_name: "__call__"
-      }
-      children {
-        node_id: 245
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 245
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 190
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 114
-        local_name: "variables"
-      }
-      children {
-        node_id: 191
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 192
-        local_name: "metrics"
-      }
-      children {
-        node_id: 193
-        local_name: "layers"
-      }
-      children {
-        node_id: 115
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 194
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 116
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 246
-        local_name: "__call__"
-      }
-      children {
-        node_id: 247
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 247
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 195
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 123
-        local_name: "variables"
-      }
-      children {
-        node_id: 196
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 197
-        local_name: "metrics"
-      }
-      children {
-        node_id: 198
-        local_name: "layers"
-      }
-      children {
-        node_id: 124
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 199
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 125
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 248
-        local_name: "__call__"
-      }
-      children {
-        node_id: 249
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 249
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "0"
-      }
-      children {
-        node_id: 11
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 10
-        local_name: "0"
-      }
-      children {
-        node_id: 11
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 200
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 127
-        local_name: "variables"
-      }
-      children {
-        node_id: 201
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 202
-        local_name: "metrics"
-      }
-      children {
-        node_id: 203
-        local_name: "layers"
-      }
-      children {
-        node_id: 128
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 204
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 129
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 250
-        local_name: "__call__"
-      }
-      children {
-        node_id: 251
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 251
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 12
-        local_name: "0"
-      }
-      children {
-        node_id: 13
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 12
-        local_name: "0"
-      }
-      children {
-        node_id: 13
-        local_name: "1"
-      }
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      children {
-        node_id: 205
-        local_name: "layer_metrics"
-      }
-      children {
-        node_id: 131
-        local_name: "variables"
-      }
-      children {
-        node_id: 206
-        local_name: "layer_regularization_losses"
-      }
-      children {
-        node_id: 207
-        local_name: "metrics"
-      }
-      children {
-        node_id: 208
-        local_name: "layers"
-      }
-      children {
-        node_id: 132
-        local_name: "regularization_losses"
-      }
-      children {
-        node_id: 209
-        local_name: "non_trainable_variables"
-      }
-      children {
-        node_id: 133
-        local_name: "trainable_variables"
-      }
-      children {
-        node_id: 252
-        local_name: "__call__"
-      }
-      children {
-        node_id: 253
-        local_name: "call_and_return_all_conditional_losses"
-      }
-      children {
-        node_id: 253
-        local_name: "call_and_return_conditional_losses"
-      }
-      user_object {
-        identifier: "_generic_user_object"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_dict_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      user_object {
-        identifier: "trackable_list_wrapper"
-        version {
-          producer: 1
-          min_consumer: 1
-        }
-      }
-    }
-    nodes {
-      function {
-        concrete_functions: "__inference_polymorphic_action_fn_4619080"
-        concrete_functions: "__inference_polymorphic_action_fn_946"
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "time_step"
-                    }
-                    values {
-                      string_value: "policy_state"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  tuple_value {
-                    values {
-                      tuple_value {
-                      }
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        concrete_functions: "__inference_function_722"
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      bare_concrete_function {
-        concrete_function_name: "__inference_<lambda>_728"
-      }
-    }
-    nodes {
-      bare_concrete_function {
-        concrete_function_name: "__inference_signature_wrapper_4619026"
-        argument_keywords: "callee_basic_block_count"
-        argument_keywords: "callee_conditionally_executed_blocks"
-        argument_keywords: "callee_users"
-        argument_keywords: "caller_basic_block_count"
-        argument_keywords: "caller_conditionally_executed_blocks"
-        argument_keywords: "caller_users"
-        argument_keywords: "callsite_height"
-        argument_keywords: "cost_estimate"
-        argument_keywords: "discount"
-        argument_keywords: "edge_count"
-        argument_keywords: "inlining_default"
-        argument_keywords: "node_count"
-        argument_keywords: "nr_ctant_params"
-        argument_keywords: "reward"
-        argument_keywords: "step_type"
-      }
-    }
-    nodes {
-      bare_concrete_function {
-        concrete_function_name: "__inference_signature_wrapper_4619033"
-      }
-    }
-    nodes {
-      bare_concrete_function {
-        concrete_function_name: "__inference_signature_wrapper_4619048"
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "observation"
-                    }
-                    values {
-                      string_value: "step_type"
-                    }
-                    values {
-                      string_value: "network_state"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      tuple_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "observation"
-                    }
-                    values {
-                      string_value: "step_type"
-                    }
-                    values {
-                      string_value: "network_state"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      tuple_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "observation"
-                    }
-                    values {
-                      string_value: "step_type"
-                    }
-                    values {
-                      string_value: "network_state"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      tuple_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "observation"
-                    }
-                    values {
-                      string_value: "step_type"
-                    }
-                    values {
-                      string_value: "network_state"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      tuple_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                    values {
-                      string_value: "mask"
-                    }
-                    values {
-                      string_value: "training"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  list_value {
-                    values {
-                      none_value {
-                      }
-                    }
-                    values {
-                      bool_value: false
-                    }
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    nodes {
-      function {
-        function_spec {
-          fullargspec {
-            named_tuple_value {
-              name: "FullArgSpec"
-              values {
-                key: "args"
-                value {
-                  list_value {
-                    values {
-                      string_value: "self"
-                    }
-                    values {
-                      string_value: "inputs"
-                    }
-                  }
-                }
-              }
-              values {
-                key: "varargs"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "varkw"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "defaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlyargs"
-                value {
-                  list_value {
-                  }
-                }
-              }
-              values {
-                key: "kwonlydefaults"
-                value {
-                  none_value {
-                  }
-                }
-              }
-              values {
-                key: "annotations"
-                value {
-                  dict_value {
-                  }
-                }
-              }
-            }
-          }
-          is_method: true
-          input_signature {
-            none_value {
-            }
-          }
-        }
-      }
-    }
-    concrete_functions {
-      key: "__inference_<lambda>_728"
-      value {
-        bound_inputs: 4
-        canonicalized_input_signature {
-          tuple_value {
-            values {
-              tuple_value {
-              }
-            }
-            values {
-              dict_value {
-              }
-            }
-          }
-        }
-        output_signature {
-          tensor_spec_value {
-            shape {
-            }
-            dtype: DT_INT64
-          }
-        }
-      }
-    }
-    concrete_functions {
-      key: "__inference_function_722"
-      value {
-        canonicalized_input_signature {
-          tuple_value {
-            values {
-              tuple_value {
-              }
-            }
-            values {
-              dict_value {
-              }
-            }
-          }
-        }
-        output_signature {
-          tuple_value {
-          }
-        }
-      }
-    }
-    concrete_functions {
-      key: "__inference_polymorphic_action_fn_4619080"
-      value {
-        bound_inputs: 10
-        bound_inputs: 11
-        bound_inputs: 12
-        bound_inputs: 13
-        bound_inputs: 14
-        bound_inputs: 15
-        canonicalized_input_signature {
-          tuple_value {
-            values {
-              tuple_value {
-                values {
-                  named_tuple_value {
-                    name: "TimeStep"
-                    values {
-                      key: "step_type"
-                      value {
-                        tensor_spec_value {
-                          name: "time_step/step_type"
-                          shape {
-                            dim {
-                              size: 1
-                            }
-                          }
-                          dtype: DT_INT32
-                        }
-                      }
-                    }
-                    values {
-                      key: "reward"
-                      value {
-                        tensor_spec_value {
-                          name: "time_step/reward"
-                          shape {
-                            dim {
-                              size: 1
-                            }
-                          }
-                          dtype: DT_FLOAT
-                        }
-                      }
-                    }
-                    values {
-                      key: "discount"
-                      value {
-                        tensor_spec_value {
-                          name: "time_step/discount"
-                          shape {
-                            dim {
-                              size: 1
-                            }
-                          }
-                          dtype: DT_FLOAT
-                        }
-                      }
-                    }
-                    values {
-                      key: "observation"
-                      value {
-                        dict_value {
-                          fields {
-                            key: "callee_basic_block_count"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/callee_basic_block_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "callee_conditionally_executed_blocks"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/callee_conditionally_executed_blocks"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "callee_users"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/callee_users"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "caller_basic_block_count"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/caller_basic_block_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "caller_conditionally_executed_blocks"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/caller_conditionally_executed_blocks"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "caller_users"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/caller_users"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "callsite_height"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/callsite_height"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "cost_estimate"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/cost_estimate"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "edge_count"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/edge_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "inlining_default"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/inlining_default"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "node_count"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/node_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "nr_ctant_params"
-                            value {
-                              tensor_spec_value {
-                                name: "time_step/observation/nr_ctant_params"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                        }
-                      }
-                    }
-                  }
-                }
-                values {
-                  tuple_value {
-                  }
-                }
-              }
-            }
-            values {
-              dict_value {
-              }
-            }
-          }
-        }
-        output_signature {
-          named_tuple_value {
-            name: "PolicyStep"
-            values {
-              key: "action"
-              value {
-                tensor_spec_value {
-                  name: "action"
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                  dtype: DT_INT64
-                }
-              }
-            }
-            values {
-              key: "state"
-              value {
-                tuple_value {
-                }
-              }
-            }
-            values {
-              key: "info"
-              value {
-                tuple_value {
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    concrete_functions {
-      key: "__inference_polymorphic_action_fn_946"
-      value {
-        bound_inputs: 10
-        bound_inputs: 11
-        bound_inputs: 12
-        bound_inputs: 13
-        bound_inputs: 14
-        bound_inputs: 15
-        canonicalized_input_signature {
-          tuple_value {
-            values {
-              tuple_value {
-                values {
-                  named_tuple_value {
-                    name: "TimeStep"
-                    values {
-                      key: "step_type"
-                      value {
-                        tensor_spec_value {
-                          name: "step_type"
-                          shape {
-                            dim {
-                              size: 1
-                            }
-                          }
-                          dtype: DT_INT32
-                        }
-                      }
-                    }
-                    values {
-                      key: "reward"
-                      value {
-                        tensor_spec_value {
-                          name: "reward"
-                          shape {
-                            dim {
-                              size: 1
-                            }
-                          }
-                          dtype: DT_FLOAT
-                        }
-                      }
-                    }
-                    values {
-                      key: "discount"
-                      value {
-                        tensor_spec_value {
-                          name: "discount"
-                          shape {
-                            dim {
-                              size: 1
-                            }
-                          }
-                          dtype: DT_FLOAT
-                        }
-                      }
-                    }
-                    values {
-                      key: "observation"
-                      value {
-                        dict_value {
-                          fields {
-                            key: "callee_basic_block_count"
-                            value {
-                              tensor_spec_value {
-                                name: "callee_basic_block_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "callee_conditionally_executed_blocks"
-                            value {
-                              tensor_spec_value {
-                                name: "callee_conditionally_executed_blocks"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "callee_users"
-                            value {
-                              tensor_spec_value {
-                                name: "callee_users"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "caller_basic_block_count"
-                            value {
-                              tensor_spec_value {
-                                name: "caller_basic_block_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "caller_conditionally_executed_blocks"
-                            value {
-                              tensor_spec_value {
-                                name: "caller_conditionally_executed_blocks"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "caller_users"
-                            value {
-                              tensor_spec_value {
-                                name: "caller_users"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "callsite_height"
-                            value {
-                              tensor_spec_value {
-                                name: "callsite_height"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "cost_estimate"
-                            value {
-                              tensor_spec_value {
-                                name: "cost_estimate"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "edge_count"
-                            value {
-                              tensor_spec_value {
-                                name: "edge_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "inlining_default"
-                            value {
-                              tensor_spec_value {
-                                name: "inlining_default"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "node_count"
-                            value {
-                              tensor_spec_value {
-                                name: "node_count"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                          fields {
-                            key: "nr_ctant_params"
-                            value {
-                              tensor_spec_value {
-                                name: "nr_ctant_params"
-                                shape {
-                                  dim {
-                                    size: 1
-                                  }
-                                }
-                                dtype: DT_INT64
-                              }
-                            }
-                          }
-                        }
-                      }
-                    }
-                  }
-                }
-                values {
-                  tuple_value {
-                  }
-                }
-              }
-            }
-            values {
-              dict_value {
-              }
-            }
-          }
-        }
-        output_signature {
-          named_tuple_value {
-            name: "PolicyStep"
-            values {
-              key: "action"
-              value {
-                tensor_spec_value {
-                  name: "action"
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                  dtype: DT_INT64
-                }
-              }
-            }
-            values {
-              key: "state"
-              value {
-                tuple_value {
-                }
-              }
-            }
-            values {
-              key: "info"
-              value {
-                tuple_value {
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    concrete_functions {
-      key: "__inference_signature_wrapper_4619026"
-      value {
-        bound_inputs: 10
-        bound_inputs: 11
-        bound_inputs: 12
-        bound_inputs: 13
-        bound_inputs: 14
-        bound_inputs: 15
-        canonicalized_input_signature {
-          tuple_value {
-            values {
-              tuple_value {
-              }
-            }
-            values {
-              dict_value {
-                fields {
-                  key: "callee_basic_block_count"
-                  value {
-                    tensor_spec_value {
-                      name: "callee_basic_block_count"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "callee_conditionally_executed_blocks"
-                  value {
-                    tensor_spec_value {
-                      name: "callee_conditionally_executed_blocks"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "callee_users"
-                  value {
-                    tensor_spec_value {
-                      name: "callee_users"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "caller_basic_block_count"
-                  value {
-                    tensor_spec_value {
-                      name: "caller_basic_block_count"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "caller_conditionally_executed_blocks"
-                  value {
-                    tensor_spec_value {
-                      name: "caller_conditionally_executed_blocks"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "caller_users"
-                  value {
-                    tensor_spec_value {
-                      name: "caller_users"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "callsite_height"
-                  value {
-                    tensor_spec_value {
-                      name: "callsite_height"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "cost_estimate"
-                  value {
-                    tensor_spec_value {
-                      name: "cost_estimate"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "discount"
-                  value {
-                    tensor_spec_value {
-                      name: "discount"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_FLOAT
-                    }
-                  }
-                }
-                fields {
-                  key: "edge_count"
-                  value {
-                    tensor_spec_value {
-                      name: "edge_count"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "inlining_default"
-                  value {
-                    tensor_spec_value {
-                      name: "inlining_default"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "node_count"
-                  value {
-                    tensor_spec_value {
-                      name: "node_count"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "nr_ctant_params"
-                  value {
-                    tensor_spec_value {
-                      name: "nr_ctant_params"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT64
-                    }
-                  }
-                }
-                fields {
-                  key: "reward"
-                  value {
-                    tensor_spec_value {
-                      name: "reward"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_FLOAT
-                    }
-                  }
-                }
-                fields {
-                  key: "step_type"
-                  value {
-                    tensor_spec_value {
-                      name: "step_type"
-                      shape {
-                        dim {
-                          size: 1
-                        }
-                      }
-                      dtype: DT_INT32
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-        output_signature {
-          dict_value {
-            fields {
-              key: "inlining_decision"
-              value {
-                tensor_spec_value {
-                  name: "inlining_decision"
-                  shape {
-                    dim {
-                      size: 1
-                    }
-                  }
-                  dtype: DT_INT64
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-    concrete_functions {
-      key: "__inference_signature_wrapper_4619033"
-      value {
-        canonicalized_input_signature {
-          tuple_value {
-            values {
-              tuple_value {
-              }
-            }
-            values {
-              dict_value {
-              }
-            }
-          }
-        }
-        output_signature {
-          dict_value {
-          }
-        }
-      }
-    }
-    concrete_functions {
-      key: "__inference_signature_wrapper_4619048"
-      value {
-        bound_inputs: 4
-        canonicalized_input_signature {
-          tuple_value {
-            values {
-              tuple_value {
-              }
-            }
-            values {
-              dict_value {
-              }
-            }
-          }
-        }
-        output_signature {
-          dict_value {
-            fields {
-              key: "int64"
-              value {
-                tensor_spec_value {
-                  name: "int64"
-                  shape {
-                  }
-                  dtype: DT_INT64
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-}
-
diff --git a/src/llvm-project/llvm/lib/Analysis/models/inliner/variables/variables.data-00000-of-00001 b/src/llvm-project/llvm/lib/Analysis/models/inliner/variables/variables.data-00000-of-00001
deleted file mode 100644
index ee7d706..0000000
--- a/src/llvm-project/llvm/lib/Analysis/models/inliner/variables/variables.data-00000-of-00001
+++ /dev/null
Binary files differ
diff --git a/src/llvm-project/llvm/lib/Analysis/models/inliner/variables/variables.index b/src/llvm-project/llvm/lib/Analysis/models/inliner/variables/variables.index
deleted file mode 100644
index 7e0c10c..0000000
--- a/src/llvm-project/llvm/lib/Analysis/models/inliner/variables/variables.index
+++ /dev/null
Binary files differ
diff --git a/src/llvm-project/llvm/lib/Analysis/models/inlining/config.py b/src/llvm-project/llvm/lib/Analysis/models/inlining/config.py
new file mode 100644
index 0000000..78d3a82
--- /dev/null
+++ b/src/llvm-project/llvm/lib/Analysis/models/inlining/config.py
@@ -0,0 +1,87 @@
+"""Inlining Training config."""
+
+import tensorflow as tf
+
+POLICY_DECISION_LABEL = 'inlining_decision'
+POLICY_OUTPUT_SPEC = """
+[
+    {
+        "logging_name": "inlining_decision",
+        "tensor_spec": {
+            "name": "StatefulPartitionedCall",
+            "port": 0,
+            "type": "int64_t",
+            "shape": [
+                1
+            ]
+        }
+    }
+]
+"""
+
+
+# pylint: disable=g-complex-comprehension
+def get_input_signature():
+  """Returns the list of features for LLVM inlining."""
+  # int64 features
+  inputs = [
+      tf.TensorSpec(dtype=tf.int64, shape=(), name=key) for key in [
+          'caller_basic_block_count',
+          'caller_conditionally_executed_blocks',
+          'caller_users',
+          'callee_basic_block_count',
+          'callee_conditionally_executed_blocks',
+          'callee_users',
+          'nr_ctant_params',
+          'node_count',
+          'edge_count',
+          'callsite_height',
+          'cost_estimate',
+          'inlining_default',
+          'sroa_savings',
+          'sroa_losses',
+          'load_elimination',
+          'call_penalty',
+          'call_argument_setup',
+          'load_relative_intrinsic',
+          'lowered_call_arg_setup',
+          'indirect_call_penalty',
+          'jump_table_penalty',
+          'case_cluster_penalty',
+          'switch_penalty',
+          'unsimplified_common_instructions',
+          'num_loops',
+          'dead_blocks',
+          'simplified_instructions',
+          'constant_args',
+          'constant_offset_ptr_args',
+          'callsite_cost',
+          'cold_cc_penalty',
+          'last_call_to_static_bonus',
+          'is_multiple_blocks',
+          'nested_inlines',
+          'nested_inline_cost_estimate',
+          'threshold',
+      ]
+  ]
+
+  # float32 features
+  inputs.extend([
+      tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
+      for key in ['discount', 'reward']
+  ])
+
+  # int32 features
+  inputs.extend([
+      tf.TensorSpec(dtype=tf.int32, shape=(), name=key)
+      for key in ['step_type']
+  ])
+  return inputs
+
+
+def get_output_signature():
+  return POLICY_DECISION_LABEL
+
+
+def get_output_spec():
+  return POLICY_OUTPUT_SPEC